From 07c4efc6d9da585a5d8173d3101c918b839d867f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 23:26:24 +0000 Subject: [PATCH] build(deps): bump github.com/sigstore/cosign/v2 from 2.2.3 to 2.4.1 Bumps [github.com/sigstore/cosign/v2](https://github.com/sigstore/cosign) from 2.2.3 to 2.4.1. - [Release notes](https://github.com/sigstore/cosign/releases) - [Changelog](https://github.com/sigstore/cosign/blob/main/CHANGELOG.md) - [Commits](https://github.com/sigstore/cosign/compare/v2.2.3...v2.4.1) --- updated-dependencies: - dependency-name: github.com/sigstore/cosign/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 112 +- go.sum | 345 +- vendor/cloud.google.com/go/auth/CHANGES.md | 261 + vendor/cloud.google.com/go/auth/LICENSE | 202 + vendor/cloud.google.com/go/auth/README.md | 4 + vendor/cloud.google.com/go/auth/auth.go | 612 ++ .../go/auth/credentials/compute.go | 86 + .../go/auth/credentials/detect.go | 262 + .../go/auth/credentials/doc.go | 45 + .../go/auth/credentials/filetypes.go | 219 + .../go/auth/credentials/idtoken/cache.go | 133 + .../go/auth/credentials/idtoken/compute.go | 83 + .../go/auth/credentials/idtoken/file.go | 127 + .../go/auth/credentials/idtoken/idtoken.go | 136 + .../go/auth/credentials/idtoken/validate.go | 269 + .../go/auth/credentials/impersonate/doc.go | 45 + .../auth/credentials/impersonate/idtoken.go | 187 + .../credentials/impersonate/impersonate.go | 266 + .../go/auth/credentials/impersonate/user.go | 174 + .../internal/externalaccount/aws_provider.go | 522 + .../externalaccount/executable_provider.go | 284 + .../externalaccount/externalaccount.go | 407 + .../internal/externalaccount/file_provider.go | 78 + .../internal/externalaccount/info.go | 74 + .../externalaccount/programmatic_provider.go} | 26 +- .../internal/externalaccount/url_provider.go | 88 + .../internal/externalaccount/x509_provider.go | 63 + .../externalaccountuser.go | 110 + .../go/auth/credentials/internal/gdch/gdch.go | 184 + .../internal/impersonate/impersonate.go | 146 + .../internal/stsexchange/sts_exchange.go | 161 + .../go/auth/credentials/selfsignedjwt.go | 81 + .../go/auth/httptransport/httptransport.go | 226 + .../go/auth/httptransport/trace.go | 93 + .../go/auth/httptransport/transport.go | 220 + .../go/auth/internal/credsfile/credsfile.go | 107 + .../go/auth/internal/credsfile/filetype.go | 157 + .../go/auth/internal/credsfile/parse.go | 98 + .../go/auth/internal/internal.go | 213 + .../go/auth/internal/jwt/jwt.go | 171 + .../go/auth/internal/transport/cba.go | 356 + .../internal/transport/cert/default_cert.go | 65 + .../transport/cert/enterprise_cert.go | 56 + .../transport/cert/secureconnect_cert.go | 124 + .../internal/transport/cert/workload_cert.go | 117 + .../go/auth/internal/transport/s2a.go | 134 + .../go/auth/internal/transport/transport.go | 105 + .../go/auth/oauth2adapt/CHANGES.md | 54 + .../go/auth/oauth2adapt/LICENSE | 202 + .../go/auth/oauth2adapt/oauth2adapt.go | 164 + .../cloud.google.com/go/auth/threelegged.go | 368 + .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata/metadata.go | 381 +- .../go/compute/metadata/syscheck.go | 26 + .../go/compute/metadata/syscheck_linux.go} | 23 +- .../go/compute/metadata/syscheck_windows.go | 38 + .../cuelabs.dev/go/oci/ociregistry/README.md | 6 +- .../cuelabs.dev/go/oci/ociregistry/error.go | 316 +- vendor/cuelabs.dev/go/oci/ociregistry/func.go | 24 +- .../go/oci/ociregistry/interface.go | 20 +- .../ociregistry/internal/ocirequest/create.go | 111 + .../internal/ocirequest/request.go | 445 + vendor/cuelabs.dev/go/oci/ociregistry/iter.go | 79 +- .../go/oci/ociregistry/ociauth/auth.go | 519 + .../go/oci/ociregistry/ociauth/authfile.go | 358 + .../go/oci/ociregistry/ociauth/challenge.go | 167 + .../go/oci/ociregistry/ociauth/context.go | 53 + .../go/oci/ociregistry/ociauth/scope.go | 492 + .../go/oci/ociregistry/ociclient/client.go | 422 + .../go/oci/ociregistry/ociclient/deleter.go | 56 + .../go/oci/ociregistry/ociclient/error.go | 113 + .../go/oci/ociregistry/ociclient/lister.go | 180 + .../go/oci/ociregistry/ociclient/reader.go | 186 + .../go/oci/ociregistry/ociclient/writer.go | 433 + .../go/oci/ociregistry/ociref/reference.go | 233 + .../cuelabs.dev/go/oci/ociregistry/valid.go | 30 + vendor/cuelang.org/go/AUTHORS | 6 - vendor/cuelang.org/go/cue/ast/ast.go | 12 +- .../cuelang.org/go/cue/ast/astutil/apply.go | 56 +- .../cuelang.org/go/cue/ast/astutil/resolve.go | 64 +- .../go/cue/ast/astutil/sanitize.go | 10 +- vendor/cuelang.org/go/cue/ast/astutil/util.go | 4 +- vendor/cuelang.org/go/cue/ast/astutil/walk.go | 201 +- vendor/cuelang.org/go/cue/ast/ident.go | 72 +- vendor/cuelang.org/go/cue/ast/walk.go | 195 +- vendor/cuelang.org/go/cue/build.go | 11 +- vendor/cuelang.org/go/cue/build/doc.go | 2 +- vendor/cuelang.org/go/cue/build/import.go | 35 +- vendor/cuelang.org/go/cue/build/instance.go | 7 +- vendor/cuelang.org/go/cue/context.go | 4 + vendor/cuelang.org/go/cue/cue.go | 10 +- .../go/cue/cuecontext/cuecontext.go | 59 + vendor/cuelang.org/go/cue/decode.go | 47 +- vendor/cuelang.org/go/cue/errors/errors.go | 116 +- vendor/cuelang.org/go/cue/format/format.go | 13 +- vendor/cuelang.org/go/cue/format/node.go | 73 +- vendor/cuelang.org/go/cue/format/printer.go | 20 +- vendor/cuelang.org/go/cue/format/simplify.go | 6 +- vendor/cuelang.org/go/cue/instance.go | 39 +- vendor/cuelang.org/go/cue/literal/num.go | 5 +- vendor/cuelang.org/go/cue/literal/quote.go | 3 +- vendor/cuelang.org/go/cue/load/config.go | 191 +- vendor/cuelang.org/go/cue/load/doc.go | 2 +- vendor/cuelang.org/go/cue/load/errors.go | 4 +- vendor/cuelang.org/go/cue/load/fs.go | 258 +- vendor/cuelang.org/go/cue/load/import.go | 373 +- vendor/cuelang.org/go/cue/load/instances.go | 164 +- vendor/cuelang.org/go/cue/load/loader.go | 139 +- .../cuelang.org/go/cue/load/loader_common.go | 217 +- vendor/cuelang.org/go/cue/load/match.go | 112 +- vendor/cuelang.org/go/cue/load/module.go | 194 - .../cuelang.org/go/cue/load/moduleschema.cue | 23 - vendor/cuelang.org/go/cue/load/package.go | 21 - vendor/cuelang.org/go/cue/load/registry.go | 95 - vendor/cuelang.org/go/cue/load/search.go | 345 +- vendor/cuelang.org/go/cue/load/tags.go | 11 +- vendor/cuelang.org/go/cue/load/test.cue | 3 - vendor/cuelang.org/go/cue/marshal.go | 44 +- vendor/cuelang.org/go/cue/op.go | 121 - vendor/cuelang.org/go/cue/parser/doc.go | 2 +- vendor/cuelang.org/go/cue/parser/interface.go | 13 +- vendor/cuelang.org/go/cue/parser/parser.go | 19 +- vendor/cuelang.org/go/cue/path.go | 4 +- vendor/cuelang.org/go/cue/scanner/scanner.go | 2 +- vendor/cuelang.org/go/cue/token/position.go | 64 +- vendor/cuelang.org/go/cue/token/token.go | 141 +- .../cuelang.org/go/cue/token/token_string.go | 82 + vendor/cuelang.org/go/cue/types.go | 235 +- .../go/encoding/gocode/gocodec/codec.go | 191 - vendor/cuelang.org/go/encoding/json/json.go | 33 +- .../cuelang.org/go/encoding/openapi/decode.go | 6 +- vendor/cuelang.org/go/encoding/openapi/doc.go | 2 +- .../go/encoding/openapi/openapi.go | 6 +- .../cuelang.org/go/encoding/openapi/types.go | 2 +- .../go/encoding/protobuf/jsonpb/decoder.go | 5 +- .../cuelang.org/go/encoding/protobuf/parse.go | 4 +- .../go/encoding/protobuf/protobuf.go | 39 +- .../go/encoding/protobuf/textproto/decoder.go | 2 +- .../go/encoding/protobuf/textproto/encoder.go | 8 +- .../cuelang.org/go/encoding/protobuf/util.go | 4 +- .../go/internal/astinternal/debugstr.go | 2 +- vendor/cuelang.org/go/internal/attrs.go | 2 +- vendor/cuelang.org/go/internal/cli/cli.go | 6 +- .../cuelang.org/go/internal/core/adt/adt.go | 12 +- .../cuelang.org/go/internal/core/adt/binop.go | 7 + .../go/internal/core/adt/closed.go | 3 + .../go/internal/core/adt/closed2.go | 2 + .../go/internal/core/adt/composite.go | 406 +- .../go/internal/core/adt/comprehension.go | 120 +- .../go/internal/core/adt/conjunct.go | 677 ++ .../go/internal/core/adt/constraints.go | 26 +- .../go/internal/core/adt/context.go | 228 +- .../cuelang.org/go/internal/core/adt/cycle.go | 199 +- .../cuelang.org/go/internal/core/adt/debug.go | 703 ++ .../go/internal/core/adt/default.go | 17 + .../cuelang.org/go/internal/core/adt/dev.go | 79 + .../go/internal/core/adt/disjunct.go | 32 +- .../go/internal/core/adt/disjunct2.go | 763 ++ .../cuelang.org/go/internal/core/adt/doc.go | 6 +- .../go/internal/core/adt/equality.go | 25 +- .../go/internal/core/adt/errors.go | 70 +- .../cuelang.org/go/internal/core/adt/eval.go | 179 +- .../cuelang.org/go/internal/core/adt/expr.go | 301 +- .../go/internal/core/adt/feature.go | 2 +- .../go/internal/core/adt/fields.go | 756 +- vendor/cuelang.org/go/internal/core/adt/op.go | 99 +- .../go/internal/core/adt/op_string.go | 49 + .../go/internal/core/adt/overlay.go | 515 + .../cuelang.org/go/internal/core/adt/prof.go | 7 + .../cuelang.org/go/internal/core/adt/sched.go | 752 ++ .../cuelang.org/go/internal/core/adt/share.go | 183 + .../go/internal/core/adt/states.go | 328 + .../cuelang.org/go/internal/core/adt/tasks.go | 352 + .../cuelang.org/go/internal/core/adt/unify.go | 760 ++ .../go/internal/core/convert/go.go | 21 +- .../go/internal/core/debug/compact.go | 44 +- .../go/internal/core/debug/debug.go | 162 +- .../cuelang.org/go/internal/core/dep/dep.go | 16 +- .../cuelang.org/go/internal/core/dep/mixed.go | 12 +- .../cuelang.org/go/internal/core/eval/eval.go | 19 +- .../go/internal/core/export/adt.go | 10 + .../go/internal/core/export/export.go | 34 +- .../go/internal/core/export/expr.go | 17 +- .../go/internal/core/export/extract.go | 30 +- .../go/internal/core/export/self.go | 2 +- .../go/internal/core/export/toposort.go | 5 +- .../go/internal/core/export/value.go | 9 +- .../go/internal/core/runtime/build.go | 3 +- .../go/internal/core/runtime/extern.go | 37 +- .../go/internal/core/runtime/runtime.go | 34 +- .../go/internal/core/subsume/value.go | 13 +- .../go/internal/core/subsume/vertex.go | 240 +- .../go/internal/core/validate/validate.go | 6 +- .../cuelang.org/go/internal/core/walk/walk.go | 5 + .../go/internal/cueconfig/config.go | 220 + .../cuelang.org/go/internal/cuedebug/debug.go | 47 + .../go/internal/cueexperiment/exp.go | 38 + .../{cue/load => internal/cueimports}/read.go | 62 +- .../go/internal/cueversion/transport.go | 34 + .../go/internal/cueversion/version.go | 107 + .../go/internal/encoding/detect.go | 8 +- .../go/internal/encoding/encoder.go | 43 +- .../go/internal/encoding/encoding.go | 98 +- .../go/internal/encoding/yaml/decode.go | 658 ++ .../go/internal/encoding/yaml/encode.go | 2 +- .../cuelang.org/go/internal/envflag/flag.go | 95 + .../go/internal/filetypes/filetypes.go | 149 +- .../go/internal/filetypes/test.cue | 19 - .../go/internal/filetypes/types.cue | 345 +- .../go/internal/filetypes/types.go | 64 +- .../cuelang.org/go/internal/filetypes/util.go | 8 +- .../tools/robustio/gopls_windows.go | 14 + .../golangorgx/tools/robustio/robustio.go | 69 + .../tools/robustio/robustio_darwin.go | 21 + .../tools/robustio/robustio_flaky.go | 92 + .../tools/robustio/robustio_other.go | 28 + .../tools/robustio/robustio_plan9.go | 26 + .../tools/robustio/robustio_posix.go | 28 + .../tools/robustio/robustio_windows.go | 51 + vendor/cuelang.org/go/internal/internal.go | 68 +- .../go/internal/mod/modfile/modfile.go | 171 - .../go/internal/mod/modfile/schema.cue | 153 - .../go/internal/mod/modimports/modimports.go | 244 + .../go/internal/mod/modload/query.go | 135 + .../go/internal/mod/modload/tidy.go | 693 ++ .../go/internal/mod/modload/update.go | 175 + .../go/internal/mod/modpkgload/import.go | 316 + .../go/internal/mod/modpkgload/pkgload.go | 370 + .../go/internal/mod/modregistry/client.go | 359 - .../mod/modrequirements/requirements.go | 420 + .../go/internal/mod/modresolve/resolve.go | 532 + .../go/internal/mod/modresolve/schema.cue | 110 + .../go/internal/mod/module/module.go | 181 - .../cuelang.org/go/internal/mod/mvs/graph.go | 3 +- vendor/cuelang.org/go/internal/mod/mvs/mvs.go | 16 +- .../go/internal/mod/semver/semver.go | 27 +- .../internal/{mod/internal => }/par/queue.go | 0 .../internal/{mod/internal => }/par/work.go | 0 vendor/cuelang.org/go/internal/pkg/context.go | 8 +- vendor/cuelang.org/go/internal/pkg/types.go | 8 +- .../cuelang.org/go/internal/slices/slices.go | 12 - .../cuelang.org/go/internal/source/source.go | 40 +- vendor/cuelang.org/go/internal/task/task.go | 14 +- .../go/internal/third_party/yaml/decode.go | 48 +- .../go/internal/third_party/yaml/yaml.go | 147 +- vendor/cuelang.org/go/mod/modcache/cache.go | 173 + vendor/cuelang.org/go/mod/modcache/fetch.go | 367 + .../cuelang.org/go/mod/modconfig/modconfig.go | 405 + vendor/cuelang.org/go/mod/modfile/modfile.go | 586 ++ vendor/cuelang.org/go/mod/modfile/schema.cue | 147 + .../cuelang.org/go/mod/modregistry/client.go | 504 + .../go/mod/modregistry/metadata.go | 58 + vendor/cuelang.org/go/mod/module/dirfs.go | 56 + .../go/{internal => }/mod/module/error.go | 15 - vendor/cuelang.org/go/mod/module/escape.go | 68 + vendor/cuelang.org/go/mod/module/module.go | 271 + .../go/{internal => }/mod/module/path.go | 135 +- .../go/{internal => }/mod/module/versions.go | 0 .../go/{internal => }/mod/modzip/zip.go | 74 +- .../go/pkg/encoding/yaml/manual.go | 19 +- vendor/cuelang.org/go/pkg/list/math.go | 18 +- vendor/cuelang.org/go/pkg/list/sort.go | 56 +- vendor/cuelang.org/go/pkg/math/manual.go | 15 +- vendor/cuelang.org/go/pkg/net/host.go | 2 +- vendor/cuelang.org/go/pkg/net/ip.go | 83 +- vendor/cuelang.org/go/pkg/net/pkg.go | 12 + vendor/cuelang.org/go/pkg/path/path.go | 10 +- vendor/cuelang.org/go/pkg/path/pkg.go | 1 + vendor/cuelang.org/go/pkg/tool/exec/exec.cue | 2 +- vendor/cuelang.org/go/pkg/tool/exec/exec.go | 33 +- vendor/cuelang.org/go/pkg/tool/exec/pkg.go | 6 +- vendor/cuelang.org/go/pkg/tool/http/http.cue | 10 +- vendor/cuelang.org/go/pkg/tool/http/http.go | 10 +- vendor/cuelang.org/go/pkg/tool/http/pkg.go | 10 +- vendor/cuelang.org/go/pkg/tool/os/env.go | 2 +- .../alibabacloudsdkgo/helper/helper.go | 51 - .../helper => provider}/LICENSE | 0 .../provider/accesskey_provider.go | 27 + .../credentials/provider/chain_provider.go | 220 + .../pkg/credentials/provider/credentials.go | 22 + .../provider/ecsmetadata_provider.go | 198 + .../provider/encryptedfile_provider.go | 135 + .../pkg/credentials/provider/endpoint.go | 43 + .../pkg/credentials/provider/env.go | 155 + .../pkg/credentials/provider/env_provider.go | 128 + .../pkg/credentials/provider/error.go | 55 + .../pkg/credentials/provider/file_provider.go | 76 + .../credentials/provider/function_provider.go | 28 + .../pkg/credentials/provider/http.go | 91 + .../pkg/credentials/provider/log.go | 44 + .../pkg/credentials/provider/oidc_provider.go | 306 + .../pkg/credentials/provider/provider.go | 24 + .../pkg/credentials/provider/req.go | 174 + .../credentials/provider/rolearn_provider.go | 240 + .../provider/semaphore_provider.go | 48 + .../credentials/provider/ststoken_provider.go | 35 + .../pkg/credentials/provider/updater.go | 206 + .../pkg/credentials/provider/uri_provider.go | 117 + .../pkg/credentials/provider/v1sdk.go | 92 + .../pkg/credentials/provider/v2sdk.go | 87 + .../credentials/oidc_credential.go | 4 + .../aws/accountid_endpoint_mode.go | 18 + .../aws/aws-sdk-go-v2/aws/config.go | 3 + .../aws/aws-sdk-go-v2/aws/credentials.go | 3 + .../aws/aws-sdk-go-v2/aws/endpoints.go | 26 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/private/metrics/metrics.go | 5 +- .../aws/middleware/user_agent.go | 44 + .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 45 +- .../aws/retry/retryable_error.go | 21 + .../aws/signer/internal/v4/headers.go | 2 - .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 29 - .../aws/aws-sdk-go-v2/aws/signer/v4/v4.go | 66 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 98 + .../aws/aws-sdk-go-v2/config/config.go | 3 + .../aws/aws-sdk-go-v2/config/env_config.go | 37 + .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/load_options.go | 29 +- .../aws/aws-sdk-go-v2/config/provider.go | 17 + .../aws/aws-sdk-go-v2/config/resolve.go | 16 + .../aws/aws-sdk-go-v2/config/shared_config.go | 34 + .../aws-sdk-go-v2/credentials/CHANGELOG.md | 93 + .../endpointcreds/internal/client/client.go | 1 + .../credentials/endpointcreds/provider.go | 1 + .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 4 + .../credentials/ssocreds/sso_cached_token.go | 2 +- .../ssocreds/sso_credentials_provider.go | 1 + .../stscreds/assume_role_provider.go | 6 + .../stscreds/web_identity_provider.go | 19 + .../feature/ec2/imds/CHANGELOG.md | 53 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/auth/smithy/v4signer_adapter.go | 6 +- .../internal/configsources/CHANGELOG.md | 53 + .../configsources/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/context/context.go | 52 + .../endpoints/awsrulesfn/partition.go | 11 +- .../endpoints/awsrulesfn/partitions.go | 94 +- .../endpoints/awsrulesfn/partitions.json | 9 +- .../internal/endpoints/v2/CHANGELOG.md | 53 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../internal/middleware/middleware.go | 42 + .../internal/accept-encoding/CHANGELOG.md | 12 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 53 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 79 + .../aws-sdk-go-v2/service/sso/api_client.go | 108 +- .../service/sso/api_op_GetRoleCredentials.go | 13 +- .../service/sso/api_op_ListAccountRoles.go | 32 +- .../service/sso/api_op_ListAccounts.go | 37 +- .../service/sso/api_op_Logout.go | 37 +- .../aws/aws-sdk-go-v2/service/sso/auth.go | 8 +- .../service/sso/deserializers.go | 10 + .../aws/aws-sdk-go-v2/service/sso/doc.go | 22 +- .../aws-sdk-go-v2/service/sso/endpoints.go | 19 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 26 +- .../aws/aws-sdk-go-v2/service/sso/options.go | 34 +- .../aws-sdk-go-v2/service/sso/types/types.go | 20 +- .../service/ssooidc/CHANGELOG.md | 79 + .../service/ssooidc/api_client.go | 108 +- .../service/ssooidc/api_op_CreateToken.go | 62 +- .../ssooidc/api_op_CreateTokenWithIAM.go | 69 +- .../service/ssooidc/api_op_RegisterClient.go | 25 + .../api_op_StartDeviceAuthorization.go | 17 +- .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 8 +- .../service/ssooidc/deserializers.go | 101 + .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 40 +- .../service/ssooidc/endpoints.go | 19 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 26 +- .../aws-sdk-go-v2/service/ssooidc/options.go | 34 +- .../service/ssooidc/serializers.go | 56 + .../service/ssooidc/types/errors.go | 32 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 75 + .../aws-sdk-go-v2/service/sts/api_client.go | 108 +- .../service/sts/api_op_AssumeRole.go | 457 +- .../service/sts/api_op_AssumeRoleWithSAML.go | 373 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 387 +- .../sts/api_op_DecodeAuthorizationMessage.go | 51 +- .../service/sts/api_op_GetAccessKeyInfo.go | 55 +- .../service/sts/api_op_GetCallerIdentity.go | 32 +- .../service/sts/api_op_GetFederationToken.go | 315 +- .../service/sts/api_op_GetSessionToken.go | 110 +- .../aws/aws-sdk-go-v2/service/sts/auth.go | 8 +- .../service/sts/deserializers.go | 9 + .../aws/aws-sdk-go-v2/service/sts/doc.go | 12 +- .../aws-sdk-go-v2/service/sts/endpoints.go | 19 +- .../service/sts/go_module_metadata.go | 2 +- .../sts/internal/endpoints/endpoints.go | 5 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 34 +- .../aws-sdk-go-v2/service/sts/types/errors.go | 26 +- .../aws-sdk-go-v2/service/sts/types/types.go | 50 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 16 + vendor/github.com/aws/smithy-go/README.md | 74 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../github.com/buildkite/agent/v3/LICENSE.txt | 2 +- .../buildkite/agent/v3/api/annotations.go | 9 +- .../buildkite/agent/v3/api/artifacts.go | 2 +- .../buildkite/agent/v3/api/client.go | 42 +- .../agent/v3/api/github_code_access_token.go | 63 + .../buildkite/agent/v3/api/secrets.go | 40 + .../buildkite/agent/v3/logger/log.go | 8 +- .../buildkite/agent/v3/version/VERSION | 2 +- .../buildkite/go-pipeline/internal/env/env.go | 81 + .../buildkite/go-pipeline/ordered/map.go | 12 +- .../go-pipeline/ordered/unmarshal.go | 70 +- .../buildkite/go-pipeline/parser.go | 15 + .../buildkite/go-pipeline/pipeline.go | 73 +- .../buildkite/go-pipeline/plugin.go | 20 +- .../buildkite/go-pipeline/step_command.go | 1 + .../go-pipeline/step_command_cache.go | 70 + .../go-pipeline/step_command_matrix.go | 6 + .../buildkite/go-pipeline/step_input.go | 20 +- .../buildkite/go-pipeline/step_scalar.go | 12 +- .../buildkite/go-pipeline/step_unknown.go | 12 +- .../buildkite/go-pipeline/step_wait.go | 26 +- .../github.com/buildkite/go-pipeline/steps.go | 80 +- .../buildkite/go-pipeline/warning/warning.go | 143 + .../buildkite/interpolate/README.md | 6 +- .../buildkite/interpolate/interpolate.go | 13 + .../buildkite/interpolate/parser.go | 63 +- vendor/github.com/buildkite/roko/.gitignore | 21 + vendor/github.com/buildkite/roko/LICENSE.md | 7 + vendor/github.com/buildkite/roko/README.md | 208 + vendor/github.com/buildkite/roko/retrier.go | 354 + .../github.com/coreos/go-oidc/v3/oidc/jwks.go | 16 +- .../coreos/go-oidc/v3/oidc/verify.go | 4 +- .../github.com/dustin/go-humanize/.travis.yml | 21 + .../unique => dustin/go-humanize}/LICENSE | 9 +- .../dustin/go-humanize/README.markdown | 124 + vendor/github.com/dustin/go-humanize/big.go | 31 + .../github.com/dustin/go-humanize/bigbytes.go | 189 + vendor/github.com/dustin/go-humanize/bytes.go | 143 + vendor/github.com/dustin/go-humanize/comma.go | 116 + .../github.com/dustin/go-humanize/commaf.go | 41 + vendor/github.com/dustin/go-humanize/ftoa.go | 49 + .../github.com/dustin/go-humanize/humanize.go | 8 + .../github.com/dustin/go-humanize/number.go | 192 + .../github.com/dustin/go-humanize/ordinals.go | 25 + vendor/github.com/dustin/go-humanize/si.go | 127 + vendor/github.com/dustin/go-humanize/times.go | 117 + .../go-jose/go-jose/v4/CHANGELOG.md | 24 + .../github.com/go-jose/go-jose/v4/crypter.go | 10 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 21 +- .../go-jose/go-jose/v4/jwt/builder.go | 315 + .../go-jose/go-jose/v4/jwt/claims.go | 130 + .../github.com/go-jose/go-jose/v4/jwt/doc.go | 20 + .../go-jose/go-jose/v4/jwt/errors.go | 53 + .../github.com/go-jose/go-jose/v4/jwt/jwt.go | 198 + .../go-jose/go-jose/v4/jwt/validation.go | 127 + .../github.com/go-jose/go-jose/v4/opaque.go | 3 + .../github.com/go-jose/go-jose/v4/signing.go | 10 +- .../.golangci.yaml | 10 +- .../certificate-transparency-go/AUTHORS | 1 + .../certificate-transparency-go/CHANGELOG.md | 143 + .../certificate-transparency-go/CONTRIBUTORS | 1 + .../client/configpb/multilog.pb.go | 278 + .../client/configpb/multilog.proto | 45 + .../client/getentries.go | 68 + .../client/logclient.go | 226 + .../client/multilog.go | 223 + .../cloudbuild.yaml | 7 + .../cloudbuild_master.yaml | 7 + .../ctutil/ctutil.go | 211 + .../ctutil/loginfo.go | 174 + .../jsonclient/backoff.go | 72 + .../jsonclient/client.go | 335 + .../loglist3/logfilter.go | 129 + .../loglist3/loglist3.go | 391 + .../loglist3/logstatus_string.go | 29 + .../certificate-transparency-go/types.go | 15 + .../proto/common_go_proto/common.pb.go | 99 +- .../s2a_context_go_proto/s2a_context.pb.go | 6 +- .../internal/proto/s2a_go_proto/s2a.pb.go | 28 +- .../proto/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../proto/v2/common_go_proto/common.pb.go | 296 +- .../v2/s2a_context_go_proto/s2a_context.pb.go | 73 +- .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 772 +- .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../google/s2a-go/internal/record/record.go | 34 +- .../internal/tokenmanager/tokenmanager.go | 15 +- .../testdata/client_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/client_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/client_root_cert.der | Bin 1013 -> 0 bytes .../testdata/server_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/server_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/server_root_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.pem | 24 - .../v2/remotesigner/testdata/client_key.pem | 27 - .../v2/remotesigner/testdata/server_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/server_cert.pem | 24 - .../v2/remotesigner/testdata/server_key.pem | 27 - .../google/s2a-go/internal/v2/s2av2.go | 40 +- .../internal/v2/testdata/client_cert.pem | 24 - .../internal/v2/testdata/client_key.pem | 27 - .../internal/v2/testdata/server_cert.pem | 24 - .../internal/v2/testdata/server_key.pem | 27 - .../tlsconfigstore/testdata/client_cert.pem | 24 - .../v2/tlsconfigstore/testdata/client_key.pem | 27 - .../tlsconfigstore/testdata/server_cert.pem | 24 - .../v2/tlsconfigstore/testdata/server_key.pem | 27 - .../v2/tlsconfigstore/tlsconfigstore.go | 15 +- vendor/github.com/google/s2a-go/s2a.go | 103 +- .../github.com/google/s2a-go/s2a_options.go | 28 +- .../google/s2a-go/testdata/client_cert.pem | 24 - .../google/s2a-go/testdata/client_key.pem | 27 - .../s2a-go/testdata/mds_client_cert.pem | 19 - .../google/s2a-go/testdata/mds_client_key.pem | 28 - .../google/s2a-go/testdata/mds_root_cert.pem | 21 - .../s2a-go/testdata/mds_server_cert.pem | 21 - .../google/s2a-go/testdata/mds_server_key.pem | 28 - .../s2a-go/testdata/self_signed_cert.pem | 19 - .../s2a-go/testdata/self_signed_key.pem | 28 - .../google/s2a-go/testdata/server_cert.pem | 24 - .../google/s2a-go/testdata/server_key.pem | 27 - vendor/github.com/in-toto/attestation/LICENSE | 13 + .../attestation/go/v1/resource_descriptor.go | 61 + .../go/v1/resource_descriptor.pb.go | 233 + .../in-toto/attestation/go/v1/statement.go | 50 + .../in-toto/attestation/go/v1/statement.pb.go | 197 + .../letsencrypt/boulder/core/objects.go | 93 +- .../letsencrypt/boulder/core/util.go | 4 +- .../letsencrypt/boulder/goodkey/good_key.go | 129 +- .../pkg/acr/client.go | 7 +- .../pkg/acr/ee.go | 15 +- .../pkg/acr/openapiauth.go | 62 +- .../pkg/acr/person.go | 5 +- .../pkg/credhelper/helper.go | 2 +- vendor/github.com/mpvl/unique/.gitignore | 24 - vendor/github.com/mpvl/unique/unique.go | 98 - .../github.com/oleiade/reflections/.gitignore | 2 + .../oleiade/reflections/.golangci.yml | 76 + .../github.com/oleiade/reflections/.vale.ini | 9 + .../github.com/oleiade/reflections/README.md | 361 +- .../oleiade/reflections/reflections.go | 231 +- .../open-policy-agent/opa/ast/annotations.go | 44 +- .../open-policy-agent/opa/ast/builtins.go | 76 +- .../open-policy-agent/opa/ast/check.go | 7 +- .../open-policy-agent/opa/ast/compile.go | 206 +- .../open-policy-agent/opa/ast/env.go | 4 +- .../open-policy-agent/opa/ast/index.go | 4 +- .../opa/ast/internal/scanner/scanner.go | 17 +- .../opa/ast/location/location.go | 2 + .../open-policy-agent/opa/ast/parser.go | 71 +- .../open-policy-agent/opa/ast/parser_ext.go | 2 + .../open-policy-agent/opa/ast/policy.go | 136 +- .../open-policy-agent/opa/ast/rego_v1.go | 61 +- .../open-policy-agent/opa/ast/term.go | 8 +- .../opa/ast/version_index.json | 21 + .../open-policy-agent/opa/ast/visit.go | 12 +- .../open-policy-agent/opa/bundle/bundle.go | 314 +- .../open-policy-agent/opa/bundle/file.go | 39 +- .../open-policy-agent/opa/bundle/filefs.go | 16 + .../opa/capabilities/v0.62.0.json | 4737 +++++++++ .../opa/capabilities/v0.62.1.json | 4737 +++++++++ .../opa/capabilities/v0.63.0.json | 4781 +++++++++ .../opa/capabilities/v0.64.0.json | 4826 +++++++++ .../opa/capabilities/v0.64.1.json | 4826 +++++++++ .../opa/capabilities/v0.65.0.json | 4826 +++++++++ .../opa/capabilities/v0.66.0.json | 4826 +++++++++ .../opa/capabilities/v0.67.0.json | 4843 +++++++++ .../opa/capabilities/v0.67.1.json | 4843 +++++++++ .../opa/capabilities/v0.68.0.json | 4843 +++++++++ .../open-policy-agent/opa/config/config.go | 1 + .../open-policy-agent/opa/format/format.go | 65 +- .../opa/internal/bundle/utils.go | 7 +- .../internal/compiler/wasm/opa/callgraph.csv | 9 +- .../opa/internal/compiler/wasm/opa/opa.wasm | Bin 431632 -> 436563 bytes .../opa/internal/compiler/wasm/wasm.go | 13 +- .../opa/internal/errors/join.go | 53 - .../opa/internal/errors/join_go1.20.go | 7 - .../opa/internal/gojsonschema/schema.go | 2 +- .../opa/internal/gojsonschema/validation.go | 8 +- .../rules/fragments_on_composite_types.go | 2 +- .../validator/rules/known_argument_names.go | 4 +- .../validator/rules/known_directives.go | 2 +- .../validator/rules/known_fragment_names.go | 2 +- .../validator/rules/no_unused_fragments.go | 4 +- .../validator/rules/no_unused_variables.go | 2 +- .../rules/overlapping_fields_can_be_merged.go | 2 +- .../rules/provided_required_arguments.go | 4 +- .../validator/rules/unique_argument_names.go | 4 +- .../rules/unique_directives_per_location.go | 2 +- .../validator/rules/unique_fragment_names.go | 2 +- .../rules/unique_input_field_names.go | 2 +- .../validator/rules/unique_operation_names.go | 2 +- .../validator/rules/unique_variable_names.go | 2 +- .../validator/rules/values_of_correct_type.go | 2 +- .../rules/variables_are_input_types.go | 2 +- .../opa/internal/planner/planner.go | 36 +- .../opa/internal/providers/aws/signing_v4.go | 22 +- .../opa/internal/providers/aws/signing_v4a.go | 10 +- .../opa/internal/providers/aws/util.go | 18 +- .../opa/internal/providers/aws/v4/util.go | 9 +- .../opa/internal/runtime/init/init.go | 4 +- .../opa/internal/strings/strings.go | 8 + .../opa/internal/wasm/encoding/reader.go | 4 +- .../github.com/open-policy-agent/opa/ir/ir.go | 7 + .../open-policy-agent/opa/ir/pretty.go | 4 +- .../open-policy-agent/opa/loader/errors.go | 2 +- .../loader/internal/embedtest/bar/bar.rego | 3 - .../loader/internal/embedtest/bar/bar.yaml | 1 - .../internal/embedtest/baz/qux/qux.json | 1 - .../opa/loader/internal/embedtest/foo.json | 1 - .../open-policy-agent/opa/loader/loader.go | 63 +- .../open-policy-agent/opa/logging/logging.go | 41 +- .../open-policy-agent/opa/plugins/plugins.go | 9 +- .../opa/plugins/rest/auth.go | 115 +- .../open-policy-agent/opa/plugins/rest/aws.go | 229 +- .../open-policy-agent/opa/rego/rego.go | 97 +- .../open-policy-agent/opa/topdown/bindings.go | 2 +- .../open-policy-agent/opa/topdown/cache.go | 45 +- .../copypropagation/copypropagation.go | 6 +- .../open-policy-agent/opa/topdown/crypto.go | 161 +- .../open-policy-agent/opa/topdown/encoding.go | 87 + .../open-policy-agent/opa/topdown/eval.go | 494 +- .../open-policy-agent/opa/topdown/glob.go | 9 + .../open-policy-agent/opa/topdown/http.go | 125 +- .../opa/topdown/parse_bytes.go | 2 +- .../opa/topdown/providers.go | 16 +- .../open-policy-agent/opa/topdown/query.go | 30 +- .../opa/topdown/reachable.go | 4 +- .../open-policy-agent/opa/topdown/regex.go | 9 + .../open-policy-agent/opa/topdown/semver.go | 4 +- .../open-policy-agent/opa/topdown/strings.go | 28 +- .../open-policy-agent/opa/topdown/tokens.go | 68 +- .../open-policy-agent/opa/topdown/trace.go | 474 +- .../open-policy-agent/opa/types/decode.go | 6 +- .../open-policy-agent/opa/types/types.go | 8 +- .../opa/util/decoding/context.go | 31 + .../open-policy-agent/opa/util/hashmap.go | 2 +- .../open-policy-agent/opa/util/maps.go | 10 + .../opa/util/read_gzip_body.go | 81 + .../open-policy-agent/opa/version/version.go | 2 +- .../opa/version/version_go1.18.go | 8 - .../pelletier/go-toml/v2/.gitignore | 3 +- .../pelletier/go-toml/v2/CONTRIBUTING.md | 31 +- .../github.com/pelletier/go-toml/v2/README.md | 117 +- .../pelletier/go-toml/v2/SECURITY.md | 3 - vendor/github.com/pelletier/go-toml/v2/ci.sh | 22 +- .../go-toml/v2/internal/tracker/seen.go | 74 +- .../pelletier/go-toml/v2/marshaler.go | 39 +- .../pelletier/go-toml/v2/unmarshaler.go | 57 +- .../go-toml/v2/unstable/unmarshaler.go | 7 + .../github.com/rogpeppe/go-internal/LICENSE | 27 + .../internal/syscall/windows/mksyscall.go | 7 + .../internal/syscall/windows/psapi_windows.go | 20 + .../syscall/windows/reparse_windows.go | 64 + .../syscall/windows/security_windows.go | 128 + .../syscall/windows/symlink_windows.go | 39 + .../syscall/windows/syscall_windows.go | 307 + .../internal/syscall/windows/sysdll/sysdll.go | 28 + .../syscall/windows/zsyscall_windows.go | 363 + .../lockedfile/internal/filelock/filelock.go | 99 + .../internal/filelock/filelock_fcntl.go | 214 + .../internal/filelock/filelock_other.go | 36 + .../internal/filelock/filelock_unix.go | 44 + .../internal/filelock/filelock_windows.go | 67 + .../go-internal/lockedfile/lockedfile.go | 187 + .../lockedfile/lockedfile_filelock.go | 65 + .../lockedfile/lockedfile_plan9.go | 94 + .../rogpeppe/go-internal/lockedfile/mutex.go | 67 + .../rogpeppe/go-internal/robustio/robustio.go | 53 + .../go-internal/robustio/robustio_darwin.go | 21 + .../go-internal/robustio/robustio_flaky.go | 91 + .../go-internal/robustio/robustio_other.go | 27 + .../go-internal/robustio/robustio_windows.go | 28 + .../cosign/v2/cmd/cosign/cli/fulcio/fulcio.go | 9 +- .../v2/cmd/cosign/cli/generate/generate.go | 3 +- .../cosign/cli/generate/generate_key_pair.go | 2 +- .../v2/cmd/cosign/cli/options/attest.go | 24 +- .../v2/cmd/cosign/cli/options/attest_blob.go | 5 + .../v2/cmd/cosign/cli/options/certificate.go | 17 +- .../v2/cmd/cosign/cli/options/fulcio.go | 4 + .../cosign/v2/cmd/cosign/cli/options/key.go | 1 + .../cosign/v2/cmd/cosign/cli/options/save.go | 2 + .../cosign/v2/cmd/cosign/cli/options/sign.go | 43 +- .../v2/cmd/cosign/cli/options/signblob.go | 5 + .../v2/cmd/cosign/cli/options/verify.go | 36 +- .../cosign/v2/cmd/cosign/cli/sign/sign.go | 2 +- .../v2/cmd/cosign/cli/sign/sign_blob.go | 112 +- .../cosign/v2/cmd/cosign/cli/verify/verify.go | 151 +- .../cosign/cli/verify/verify_attestation.go | 59 +- .../v2/cmd/cosign/cli/verify/verify_blob.go | 91 +- .../cli/verify/verify_blob_attestation.go | 82 +- .../v2/cmd/cosign/cli/verify/verify_bundle.go | 163 + .../cosign/v2/internal/pkg/cosign/common.go | 3 +- .../cosign/fulcio/fulcioroots/fulcioroots.go | 7 + .../pkg/cosign/payload/size/errors.go | 31 + .../internal/pkg/cosign/payload/size/size.go | 38 + .../cosign/v2/internal/pkg/now/now.go | 38 + .../v2/pkg/cosign/bundle/protobundle.go | 65 + .../sigstore/cosign/v2/pkg/cosign/env/env.go | 14 +- .../cosign/v2/pkg/cosign/git/github/github.go | 38 +- .../sigstore/cosign/v2/pkg/cosign/keys.go | 17 +- .../sigstore/cosign/v2/pkg/cosign/tsa.go | 154 + .../sigstore/cosign/v2/pkg/cosign/verify.go | 8 +- .../cosign/v2/pkg/cosign/verify_sct.go | 2 +- .../sigstore/cosign/v2/pkg/oci/errors.go} | 27 +- .../v2/pkg/oci/internal/signature/layer.go | 10 + .../cosign/v2/pkg/oci/layout/signatures.go | 8 +- .../cosign/v2/pkg/oci/mutate/mutate.go | 2 +- .../cosign/v2/pkg/oci/mutate/options.go | 11 +- .../cosign/v2/pkg/oci/mutate/signatures.go | 22 +- .../cosign/v2/pkg/oci/remote/image.go | 7 + .../cosign/v2/pkg/oci/remote/remote.go | 10 + .../cosign/v2/pkg/oci/remote/signatures.go | 13 + .../sigstore/cosign/v2/pkg/oci/static/file.go | 23 + .../cosign/v2/pkg/oci/static/options.go | 22 +- .../cosign/v2/pkg/policy/attestation.go | 13 +- .../cosign/v2/pkg/providers/interface.go | 38 +- .../sigstore/protobuf-specs/COPYRIGHT.txt | 14 + .../sigstore/protobuf-specs/LICENSE | 202 + .../gen/pb-go/bundle/v1/sigstore_bundle.pb.go | 580 ++ .../gen/pb-go/common/v1/sigstore_common.pb.go | 1451 +++ .../gen/pb-go/dsse/envelope.pb.go | 273 + .../gen/pb-go/rekor/v1/sigstore_rekor.pb.go | 632 ++ .../trustroot/v1/sigstore_trustroot.pb.go | 765 ++ .../github.com/sigstore/rekor/pkg/tle/tle.go | 106 + .../sigstore/rekor/pkg/verify/verify.go | 234 + .../sigstore/sigstore-go/COPYRIGHT.txt | 13 + .../github.com/sigstore/sigstore-go/LICENSE | 201 + .../sigstore/sigstore-go/pkg/bundle/bundle.go | 406 + .../pkg/bundle/signature_content.go | 106 + .../pkg/bundle/verification_content.go | 88 + .../pkg/fulcio/certificate/extensions.go | 239 + .../pkg/fulcio/certificate/summarize.go | 90 + .../sigstore-go/pkg/root/trusted_material.go | 172 + .../sigstore-go/pkg/root/trusted_root.go | 459 + .../pkg/root/trusted_root_create.go | 235 + .../sigstore/sigstore-go/pkg/tlog/entry.go | 302 + .../sigstore/sigstore-go/pkg/tuf/client.go | 207 + .../sigstore/sigstore-go/pkg/tuf/config.go | 54 + .../sigstore/sigstore-go/pkg/tuf/options.go | 173 + .../sigstore-go/pkg/tuf/repository/root.json | 164 + .../pkg/tuf/repository/staging_root.json | 107 + .../sigstore/sigstore-go/pkg/util/util.go} | 32 +- .../sigstore-go/pkg/verify/certificate.go | 61 + .../pkg/verify/certificate_identity.go | 217 + .../sigstore-go/pkg/verify/errors.go} | 29 +- .../sigstore-go/pkg/verify/interface.go | 120 + .../sigstore/sigstore-go/pkg/verify/sct.go | 85 + .../sigstore-go/pkg/verify/signature.go | 267 + .../sigstore-go/pkg/verify/signed_entity.go | 746 ++ .../sigstore/sigstore-go/pkg/verify/tlog.go | 210 + .../sigstore/sigstore-go/pkg/verify/tsa.go | 131 + .../sigstore/pkg/cryptoutils/publickey.go | 70 +- .../sigstore/sigstore/pkg/oauthflow/device.go | 3 +- .../sigstore/sigstore/pkg/oauthflow/flow.go | 30 +- vendor/github.com/spf13/viper/README.md | 8 +- .../github.com/spf13/viper/TROUBLESHOOTING.md | 4 +- vendor/github.com/spf13/viper/flake.lock | 62 +- vendor/github.com/spf13/viper/flake.nix | 1 + vendor/github.com/spf13/viper/viper.go | 8 +- .../go-spiffe/v2/bundle/jwtbundle/bundle.go | 6 +- .../v2/bundle/spiffebundle/bundle.go | 6 +- .../go-spiffe/v2/bundle/x509bundle/bundle.go | 14 +- .../go-spiffe/v2/internal/cryptoutil/keys.go | 5 + .../spiffe/go-spiffe/v2/svid/jwtsvid/svid.go | 48 +- .../spiffe/go-spiffe/v2/svid/x509svid/svid.go | 5 + .../spiffe/go-spiffe/v2/workloadapi/client.go | 6 +- .../theupdateframework/go-tuf/v2/LICENSE | 201 + .../theupdateframework/go-tuf/v2/NOTICE | 9 + .../go-tuf/v2/metadata/config/config.go | 89 + .../go-tuf/v2/metadata/errors.go | 246 + .../go-tuf/v2/metadata/fetcher/fetcher.go | 93 + .../go-tuf/v2/metadata/keys.go | 133 + .../go-tuf/v2/metadata/logger.go | 45 + .../go-tuf/v2/metadata/marshal.go | 567 ++ .../go-tuf/v2/metadata/metadata.go | 922 ++ .../trustedmetadata/trustedmetadata.go | 357 + .../go-tuf/v2/metadata/types.go | 179 + .../go-tuf/v2/metadata/updater/updater.go | 711 ++ .../github.com/xanzy/go-gitlab/.golangci.yml | 3 +- vendor/github.com/xanzy/go-gitlab/README.md | 10 +- .../xanzy/go-gitlab/audit_events.go | 3 + vendor/github.com/xanzy/go-gitlab/commits.go | 49 +- .../xanzy/go-gitlab/container_registry.go | 1 + .../github.com/xanzy/go-gitlab/deployments.go | 53 +- .../xanzy/go-gitlab/dora_metrics.go | 110 + .../github.com/xanzy/go-gitlab/draft_notes.go | 233 + vendor/github.com/xanzy/go-gitlab/epics.go | 19 +- .../xanzy/go-gitlab/event_parsing.go | 62 +- .../xanzy/go-gitlab/event_webhook_types.go | 211 +- .../xanzy/go-gitlab/external_status_checks.go | 19 + vendor/github.com/xanzy/go-gitlab/gitlab.go | 42 +- .../github.com/xanzy/go-gitlab/group_hooks.go | 158 +- .../xanzy/go-gitlab/group_members.go | 39 +- .../xanzy/go-gitlab/group_milestones.go | 18 +- .../go-gitlab/group_protected_environments.go | 19 +- .../xanzy/go-gitlab/group_serviceaccounts.go | 81 +- .../xanzy/go-gitlab/group_variables.go | 28 +- vendor/github.com/xanzy/go-gitlab/groups.go | 223 +- vendor/github.com/xanzy/go-gitlab/import.go | 266 + .../xanzy/go-gitlab/instance_variables.go | 11 +- .../xanzy/go-gitlab/job_token_scope.go | 98 + vendor/github.com/xanzy/go-gitlab/jobs.go | 6 +- .../xanzy/go-gitlab/member_roles.go | 67 +- .../go-gitlab/merge_request_approvals.go | 21 + .../xanzy/go-gitlab/merge_requests.go | 41 +- vendor/github.com/xanzy/go-gitlab/packages.go | 17 +- .../xanzy/go-gitlab/personal_access_tokens.go | 71 +- .../xanzy/go-gitlab/pipeline_schedules.go | 11 +- .../github.com/xanzy/go-gitlab/pipelines.go | 12 +- .../xanzy/go-gitlab/project_members.go | 12 +- vendor/github.com/xanzy/go-gitlab/projects.go | 251 +- .../xanzy/go-gitlab/protected_environments.go | 21 +- .../xanzy/go-gitlab/releaselinks.go | 18 +- vendor/github.com/xanzy/go-gitlab/releases.go | 9 +- .../xanzy/go-gitlab/repository_files.go | 5 +- .../xanzy/go-gitlab/resource_group.go | 165 + .../go-gitlab/resource_iteration_events.go | 2 +- vendor/github.com/xanzy/go-gitlab/services.go | 452 +- vendor/github.com/xanzy/go-gitlab/settings.go | 1526 +-- vendor/github.com/xanzy/go-gitlab/snippets.go | 4 +- vendor/github.com/xanzy/go-gitlab/todos.go | 5 +- vendor/github.com/xanzy/go-gitlab/types.go | 149 +- vendor/github.com/xanzy/go-gitlab/users.go | 182 +- vendor/github.com/xanzy/go-gitlab/validate.go | 2 + .../net/http/otelhttp/client.go | 15 +- .../net/http/otelhttp/common.go | 20 +- .../net/http/otelhttp/config.go | 30 +- .../instrumentation/net/http/otelhttp/doc.go | 13 +- .../net/http/otelhttp/handler.go | 140 +- .../otelhttp/internal/request/body_wrapper.go | 75 + .../internal/request/resp_writer_wrapper.go | 112 + .../net/http/otelhttp/internal/semconv/env.go | 165 + .../otelhttp/internal/semconv/httpconv.go | 348 + .../http/otelhttp/internal/semconv/util.go | 98 + .../http/otelhttp/internal/semconv/v1.20.0.go | 192 + .../http/otelhttp/internal/semconvutil/gen.go | 13 +- .../otelhttp/internal/semconvutil/httpconv.go | 13 +- .../otelhttp/internal/semconvutil/netconv.go | 18 +- .../net/http/otelhttp/labeler.go | 21 +- .../net/http/otelhttp/transport.go | 87 +- .../net/http/otelhttp/version.go | 15 +- .../instrumentation/net/http/otelhttp/wrap.go | 100 - .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/.codespellrc | 2 +- vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 8 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 237 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 25 +- vendor/go.opentelemetry.io/otel/Makefile | 96 +- vendor/go.opentelemetry.io/otel/README.md | 54 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 7 + .../otel/attribute/README.md | 3 + .../go.opentelemetry.io/otel/attribute/doc.go | 13 +- .../otel/attribute/encoder.go | 13 +- .../otel/attribute/filter.go | 13 +- .../otel/attribute/iterator.go | 13 +- .../go.opentelemetry.io/otel/attribute/key.go | 13 +- .../go.opentelemetry.io/otel/attribute/kv.go | 13 +- .../go.opentelemetry.io/otel/attribute/set.go | 139 +- .../otel/attribute/value.go | 31 +- .../otel/baggage/README.md | 3 + .../otel/baggage/baggage.go | 392 +- .../otel/baggage/context.go | 13 +- .../go.opentelemetry.io/otel/baggage/doc.go | 13 +- .../go.opentelemetry.io/otel/codes/README.md | 3 + .../go.opentelemetry.io/otel/codes/codes.go | 15 +- vendor/go.opentelemetry.io/otel/codes/doc.go | 13 +- vendor/go.opentelemetry.io/otel/doc.go | 15 +- .../go.opentelemetry.io/otel/error_handler.go | 13 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 13 +- vendor/go.opentelemetry.io/otel/handler.go | 25 +- .../otel/internal/attribute/attribute.go | 37 +- .../otel/internal/baggage/baggage.go | 13 +- .../otel/internal/baggage/context.go | 13 +- .../go.opentelemetry.io/otel/internal/gen.go | 13 +- .../otel/internal/global/handler.go | 84 +- .../otel/internal/global/instruments.go | 65 +- .../otel/internal/global/internal_logging.go | 39 +- .../otel/internal/global/meter.go | 36 +- .../otel/internal/global/propagator.go | 13 +- .../otel/internal/global/state.go | 67 +- .../otel/internal/global/trace.go | 22 +- .../otel/internal/rawhelpers.go | 22 +- .../otel/internal_logging.go | 13 +- vendor/go.opentelemetry.io/otel/metric.go | 13 +- .../go.opentelemetry.io/otel/metric/README.md | 3 + .../otel/metric/asyncfloat64.go | 19 +- .../otel/metric/asyncint64.go | 13 +- .../go.opentelemetry.io/otel/metric/config.go | 13 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 31 +- .../otel/metric/embedded/README.md | 3 + .../otel/metric/embedded/embedded.go | 33 +- .../otel/metric/instrument.go | 35 +- .../go.opentelemetry.io/otel/metric/meter.go | 90 +- .../otel/metric/noop/README.md | 3 + .../otel/metric/noop/noop.go | 281 + .../otel/metric/syncfloat64.go | 73 +- .../otel/metric/syncint64.go | 67 +- .../go.opentelemetry.io/otel/propagation.go | 13 +- .../otel/propagation/README.md | 3 + .../otel/propagation/baggage.go | 13 +- .../otel/propagation/doc.go | 13 +- .../otel/propagation/propagation.go | 13 +- .../otel/propagation/trace_context.go | 15 +- vendor/go.opentelemetry.io/otel/renovate.json | 24 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- vendor/go.opentelemetry.io/otel/sdk/README.md | 3 + .../otel/sdk/instrumentation/README.md | 3 + .../otel/sdk/instrumentation/doc.go | 13 +- .../otel/sdk/instrumentation/library.go | 16 +- .../otel/sdk/instrumentation/scope.go | 13 +- .../otel/sdk/internal/env/env.go | 15 +- .../otel/sdk/internal/gen.go | 29 - .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/resource/README.md | 3 + .../otel/sdk/resource/auto.go | 13 +- .../otel/sdk/resource/builtin.go | 36 +- .../otel/sdk/resource/config.go | 13 +- .../otel/sdk/resource/container.go | 15 +- .../otel/sdk/resource/doc.go | 13 +- .../otel/sdk/resource/env.go | 15 +- .../otel/sdk/resource/host_id.go | 15 +- .../otel/sdk/resource/host_id_bsd.go | 13 +- .../otel/sdk/resource/host_id_darwin.go | 13 +- .../otel/sdk/resource/host_id_exec.go | 13 +- .../otel/sdk/resource/host_id_linux.go | 13 +- .../otel/sdk/resource/host_id_readfile.go | 13 +- .../otel/sdk/resource/host_id_unsupported.go | 23 +- .../otel/sdk/resource/host_id_windows.go | 13 +- .../otel/sdk/resource/os.go | 15 +- .../otel/sdk/resource/os_release_darwin.go | 13 +- .../otel/sdk/resource/os_release_unix.go | 13 +- .../otel/sdk/resource/os_unix.go | 13 +- .../otel/sdk/resource/os_unsupported.go | 25 +- .../otel/sdk/resource/os_windows.go | 13 +- .../otel/sdk/resource/process.go | 15 +- .../otel/sdk/resource/resource.go | 24 +- .../otel/sdk/trace/README.md | 3 + .../otel/sdk/trace/batch_span_processor.go | 15 +- .../go.opentelemetry.io/otel/sdk/trace/doc.go | 13 +- .../otel/sdk/trace/event.go | 13 +- .../otel/sdk/trace/evictedqueue.go | 49 +- .../otel/sdk/trace/id_generator.go | 34 +- .../otel/sdk/trace/link.go | 13 +- .../otel/sdk/trace/provider.go | 15 +- .../otel/sdk/trace/sampler_env.go | 13 +- .../otel/sdk/trace/sampling.go | 13 +- .../otel/sdk/trace/simple_span_processor.go | 50 +- .../otel/sdk/trace/snapshot.go | 15 +- .../otel/sdk/trace/span.go | 118 +- .../otel/sdk/trace/span_exporter.go | 13 +- .../otel/sdk/trace/span_limits.go | 13 +- .../otel/sdk/trace/span_processor.go | 13 +- .../otel/sdk/trace/tracer.go | 19 +- .../otel/sdk/trace/version.go | 13 +- .../go.opentelemetry.io/otel/sdk/version.go | 15 +- .../otel/semconv/internal/v2/http.go | 13 +- .../otel/semconv/internal/v2/net.go | 19 +- .../otel/semconv/v1.17.0/README.md | 3 + .../otel/semconv/v1.17.0/doc.go | 13 +- .../otel/semconv/v1.17.0/event.go | 13 +- .../otel/semconv/v1.17.0/exception.go | 13 +- .../otel/semconv/v1.17.0/http.go | 13 +- .../otel/semconv/v1.17.0/httpconv/README.md | 3 + .../otel/semconv/v1.17.0/httpconv/http.go | 13 +- .../otel/semconv/v1.17.0/resource.go | 13 +- .../otel/semconv/v1.17.0/schema.go | 13 +- .../otel/semconv/v1.17.0/trace.go | 13 +- .../otel/semconv/v1.20.0/README.md | 3 + .../otel/semconv/v1.20.0/attribute_group.go | 13 +- .../otel/semconv/v1.20.0/doc.go | 13 +- .../otel/semconv/v1.20.0/event.go | 13 +- .../otel/semconv/v1.20.0/exception.go | 13 +- .../otel/semconv/v1.20.0/http.go | 13 +- .../otel/semconv/v1.20.0/resource.go | 13 +- .../otel/semconv/v1.20.0/schema.go | 13 +- .../otel/semconv/v1.20.0/trace.go | 13 +- .../otel/semconv/v1.24.0/attribute_group.go | 4398 -------- .../otel/semconv/v1.24.0/event.go | 211 - .../otel/semconv/v1.24.0/resource.go | 2556 ----- .../otel/semconv/v1.24.0/schema.go | 20 - .../otel/semconv/v1.24.0/trace.go | 1334 --- .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 +++++++++++++++++ .../otel/semconv/v1.26.0/doc.go | 9 + .../otel/semconv/v1.26.0/exception.go | 9 + .../otel/semconv/v1.26.0/metric.go | 1307 +++ .../otel/semconv/v1.26.0/schema.go | 9 + vendor/go.opentelemetry.io/otel/trace.go | 13 +- .../go.opentelemetry.io/otel/trace/README.md | 3 + .../go.opentelemetry.io/otel/trace/config.go | 13 +- .../go.opentelemetry.io/otel/trace/context.go | 17 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 13 +- .../otel/trace/embedded/README.md | 3 + .../otel/trace/embedded/embedded.go | 13 +- .../otel/trace/nonrecording.go | 13 +- vendor/go.opentelemetry.io/otel/trace/noop.go | 20 +- .../otel/trace/noop/README.md | 3 + .../otel/trace/noop/noop.go | 20 +- .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 256 +- .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 23 +- .../otel/verify_examples.sh | 13 +- .../otel/verify_readmes.sh | 21 + .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 15 +- vendor/go.opentelemetry.io/otel/versions.yaml | 25 +- vendor/go.step.sm/crypto/internal/utils/io.go | 34 +- .../crypto/internal/utils/utfbom/LICENSE | 201 + .../crypto/internal/utils/utfbom/README.md | 66 + .../crypto/internal/utils/utfbom/utfbom.go | 195 + vendor/go.step.sm/crypto/jose/parse.go | 3 + vendor/go.step.sm/crypto/keyutil/key.go | 4 +- vendor/go.step.sm/crypto/pemutil/pem.go | 305 +- vendor/go.step.sm/crypto/pemutil/pkcs8.go | 1 - vendor/go.step.sm/crypto/pemutil/ssh.go | 10 +- .../x/net/context/ctxhttp/ctxhttp.go | 71 + vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/token.go | 7 + vendor/golang.org/x/time/LICENSE | 4 +- .../google.golang.org/api/idtoken/idtoken.go | 22 + .../google.golang.org/api/internal/creds.go | 103 +- .../api/internal/settings.go | 62 +- .../google.golang.org/api/internal/version.go | 2 +- vendor/google.golang.org/api/option/option.go | 14 + .../api/transport/http/dial.go | 105 +- .../genproto/googleapis/api/LICENSE | 202 + .../api/annotations/annotations.pb.go | 119 + .../googleapis/api/annotations/client.pb.go | 1940 ++++ .../api/annotations/field_behavior.pb.go | 266 + .../api/annotations/field_info.pb.go | 392 + .../googleapis/api/annotations/http.pb.go | 774 ++ .../googleapis/api/annotations/resource.pb.go | 660 ++ .../googleapis/api/annotations/routing.pb.go | 693 ++ .../googleapis/api/launch_stage.pb.go | 203 + vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/modules.txt | 301 +- .../release-utils/command/command.go | 28 +- .../release-utils/command/global.go | 4 +- .../sigs.k8s.io/release-utils/util/common.go | 29 +- .../release-utils/version/command.go | 4 +- .../release-utils/version/version.go | 12 +- 1046 files changed, 135727 insertions(+), 22008 deletions(-) create mode 100644 vendor/cloud.google.com/go/auth/CHANGES.md create mode 100644 vendor/cloud.google.com/go/auth/LICENSE create mode 100644 vendor/cloud.google.com/go/auth/README.md create mode 100644 vendor/cloud.google.com/go/auth/auth.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/compute.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/detect.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/doc.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/filetypes.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/idtoken/file.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/impersonate/user.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go rename vendor/{go.opentelemetry.io/otel/semconv/v1.24.0/doc.go => cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go} (54%) create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/httptransport.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/trace.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/transport.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/parse.go create mode 100644 vendor/cloud.google.com/go/auth/internal/internal.go create mode 100644 vendor/cloud.google.com/go/auth/internal/jwt/jwt.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cba.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/s2a.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/transport.go create mode 100644 vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md create mode 100644 vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE create mode 100644 vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go create mode 100644 vendor/cloud.google.com/go/auth/threelegged.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck.go rename vendor/{cuelang.org/go/cue/parser/fuzz.go => cloud.google.com/go/compute/metadata/syscheck_linux.go} (60%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go create mode 100644 vendor/cuelabs.dev/go/oci/ociregistry/valid.go delete mode 100644 vendor/cuelang.org/go/AUTHORS delete mode 100644 vendor/cuelang.org/go/cue/load/module.go delete mode 100644 vendor/cuelang.org/go/cue/load/moduleschema.cue delete mode 100644 vendor/cuelang.org/go/cue/load/registry.go delete mode 100644 vendor/cuelang.org/go/cue/load/test.cue create mode 100644 vendor/cuelang.org/go/cue/token/token_string.go delete mode 100644 vendor/cuelang.org/go/encoding/gocode/gocodec/codec.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/conjunct.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/debug.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/dev.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/disjunct2.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/op_string.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/overlay.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/sched.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/share.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/states.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/tasks.go create mode 100644 vendor/cuelang.org/go/internal/core/adt/unify.go create mode 100644 vendor/cuelang.org/go/internal/cueconfig/config.go create mode 100644 vendor/cuelang.org/go/internal/cuedebug/debug.go create mode 100644 vendor/cuelang.org/go/internal/cueexperiment/exp.go rename vendor/cuelang.org/go/{cue/load => internal/cueimports}/read.go (79%) create mode 100644 vendor/cuelang.org/go/internal/cueversion/transport.go create mode 100644 vendor/cuelang.org/go/internal/cueversion/version.go create mode 100644 vendor/cuelang.org/go/internal/encoding/yaml/decode.go create mode 100644 vendor/cuelang.org/go/internal/envflag/flag.go delete mode 100644 vendor/cuelang.org/go/internal/filetypes/test.cue create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/gopls_windows.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_darwin.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_flaky.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_other.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_plan9.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_posix.go create mode 100644 vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_windows.go delete mode 100644 vendor/cuelang.org/go/internal/mod/modfile/modfile.go delete mode 100644 vendor/cuelang.org/go/internal/mod/modfile/schema.cue create mode 100644 vendor/cuelang.org/go/internal/mod/modimports/modimports.go create mode 100644 vendor/cuelang.org/go/internal/mod/modload/query.go create mode 100644 vendor/cuelang.org/go/internal/mod/modload/tidy.go create mode 100644 vendor/cuelang.org/go/internal/mod/modload/update.go create mode 100644 vendor/cuelang.org/go/internal/mod/modpkgload/import.go create mode 100644 vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go delete mode 100644 vendor/cuelang.org/go/internal/mod/modregistry/client.go create mode 100644 vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go create mode 100644 vendor/cuelang.org/go/internal/mod/modresolve/resolve.go create mode 100644 vendor/cuelang.org/go/internal/mod/modresolve/schema.cue delete mode 100644 vendor/cuelang.org/go/internal/mod/module/module.go rename vendor/cuelang.org/go/internal/{mod/internal => }/par/queue.go (100%) rename vendor/cuelang.org/go/internal/{mod/internal => }/par/work.go (100%) delete mode 100644 vendor/cuelang.org/go/internal/slices/slices.go create mode 100644 vendor/cuelang.org/go/mod/modcache/cache.go create mode 100644 vendor/cuelang.org/go/mod/modcache/fetch.go create mode 100644 vendor/cuelang.org/go/mod/modconfig/modconfig.go create mode 100644 vendor/cuelang.org/go/mod/modfile/modfile.go create mode 100644 vendor/cuelang.org/go/mod/modfile/schema.cue create mode 100644 vendor/cuelang.org/go/mod/modregistry/client.go create mode 100644 vendor/cuelang.org/go/mod/modregistry/metadata.go create mode 100644 vendor/cuelang.org/go/mod/module/dirfs.go rename vendor/cuelang.org/go/{internal => }/mod/module/error.go (79%) create mode 100644 vendor/cuelang.org/go/mod/module/escape.go create mode 100644 vendor/cuelang.org/go/mod/module/module.go rename vendor/cuelang.org/go/{internal => }/mod/module/path.go (78%) rename vendor/cuelang.org/go/{internal => }/mod/module/versions.go (100%) rename vendor/cuelang.org/go/{internal => }/mod/modzip/zip.go (94%) delete mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go rename vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/{alibabacloudsdkgo/helper => provider}/LICENSE (100%) create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go create mode 100644 vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go create mode 100644 vendor/github.com/buildkite/agent/v3/api/secrets.go create mode 100644 vendor/github.com/buildkite/go-pipeline/internal/env/env.go create mode 100644 vendor/github.com/buildkite/go-pipeline/step_command_cache.go create mode 100644 vendor/github.com/buildkite/go-pipeline/warning/warning.go create mode 100644 vendor/github.com/buildkite/roko/.gitignore create mode 100644 vendor/github.com/buildkite/roko/LICENSE.md create mode 100644 vendor/github.com/buildkite/roko/README.md create mode 100644 vendor/github.com/buildkite/roko/retrier.go create mode 100644 vendor/github.com/dustin/go-humanize/.travis.yml rename vendor/github.com/{mpvl/unique => dustin/go-humanize}/LICENSE (84%) create mode 100644 vendor/github.com/dustin/go-humanize/README.markdown create mode 100644 vendor/github.com/dustin/go-humanize/big.go create mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go create mode 100644 vendor/github.com/dustin/go-humanize/bytes.go create mode 100644 vendor/github.com/dustin/go-humanize/comma.go create mode 100644 vendor/github.com/dustin/go-humanize/commaf.go create mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go create mode 100644 vendor/github.com/dustin/go-humanize/humanize.go create mode 100644 vendor/github.com/dustin/go-humanize/number.go create mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go create mode 100644 vendor/github.com/dustin/go-humanize/si.go create mode 100644 vendor/github.com/dustin/go-humanize/times.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/builder.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/claims.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/errors.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/jwt.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/validation.go create mode 100644 vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go create mode 100644 vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto create mode 100644 vendor/github.com/google/certificate-transparency-go/client/getentries.go create mode 100644 vendor/github.com/google/certificate-transparency-go/client/logclient.go create mode 100644 vendor/github.com/google/certificate-transparency-go/client/multilog.go create mode 100644 vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go create mode 100644 vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go create mode 100644 vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go create mode 100644 vendor/github.com/google/certificate-transparency-go/jsonclient/client.go create mode 100644 vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go create mode 100644 vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go create mode 100644 vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem create mode 100644 vendor/github.com/in-toto/attestation/LICENSE create mode 100644 vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go create mode 100644 vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go create mode 100644 vendor/github.com/in-toto/attestation/go/v1/statement.go create mode 100644 vendor/github.com/in-toto/attestation/go/v1/statement.pb.go delete mode 100644 vendor/github.com/mpvl/unique/.gitignore delete mode 100644 vendor/github.com/mpvl/unique/unique.go create mode 100644 vendor/github.com/oleiade/reflections/.golangci.yml create mode 100644 vendor/github.com/oleiade/reflections/.vale.ini create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.62.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.62.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.63.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.64.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.64.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.65.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.66.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.67.0.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.67.1.json create mode 100644 vendor/github.com/open-policy-agent/opa/capabilities/v0.68.0.json delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/errors/join.go delete mode 100644 vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go delete mode 100644 vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.rego delete mode 100644 vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.yaml delete mode 100644 vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/baz/qux/qux.json delete mode 100644 vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/foo.json create mode 100644 vendor/github.com/open-policy-agent/opa/util/decoding/context.go create mode 100644 vendor/github.com/open-policy-agent/opa/util/maps.go create mode 100644 vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go delete mode 100644 vendor/github.com/open-policy-agent/opa/version/version_go1.18.go create mode 100644 vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go create mode 100644 vendor/github.com/rogpeppe/go-internal/LICENSE create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go create mode 100644 vendor/github.com/rogpeppe/go-internal/robustio/robustio.go create mode 100644 vendor/github.com/rogpeppe/go-internal/robustio/robustio_darwin.go create mode 100644 vendor/github.com/rogpeppe/go-internal/robustio/robustio_flaky.go create mode 100644 vendor/github.com/rogpeppe/go-internal/robustio/robustio_other.go create mode 100644 vendor/github.com/rogpeppe/go-internal/robustio/robustio_windows.go create mode 100644 vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_bundle.go create mode 100644 vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/errors.go create mode 100644 vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/size.go create mode 100644 vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go create mode 100644 vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go create mode 100644 vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go rename vendor/{go.opentelemetry.io/otel/sdk/internal/internal.go => github.com/sigstore/cosign/v2/pkg/oci/errors.go} (52%) create mode 100644 vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/protobuf-specs/LICENSE create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/tle/tle.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/verify/verify.go create mode 100644 vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/sigstore-go/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json rename vendor/{cuelang.org/go/cue/scanner/fuzz.go => github.com/sigstore/sigstore-go/pkg/util/util.go} (61%) create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go rename vendor/{go.opentelemetry.io/otel/semconv/v1.24.0/exception.go => github.com/sigstore/sigstore-go/pkg/verify/errors.go} (56%) create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go create mode 100644 vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/LICENSE create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/NOTICE create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go create mode 100644 vendor/github.com/xanzy/go-gitlab/dora_metrics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/draft_notes.go create mode 100644 vendor/github.com/xanzy/go-gitlab/import.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_group.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/attribute/README.md create mode 100644 vendor/go.opentelemetry.io/otel/baggage/README.md create mode 100644 vendor/go.opentelemetry.io/otel/codes/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/README.md create mode 100644 vendor/go.opentelemetry.io/otel/renovate.json create mode 100644 vendor/go.opentelemetry.io/otel/sdk/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_readmes.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/README.md create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go diff --git a/go.mod b/go.mod index 687b679400..e2dd982e61 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module sigs.k8s.io/security-profiles-operator go 1.21.0 +toolchain go1.22.8 require ( github.com/acobaugh/osrelease v0.1.0 @@ -25,7 +26,7 @@ require ( github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/seccomp/libseccomp-golang v0.10.0 - github.com/sigstore/cosign/v2 v2.2.3 + github.com/sigstore/cosign/v2 v2.4.1 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.4 golang.org/x/mod v0.20.0 @@ -42,16 +43,18 @@ require ( oras.land/oras-go/v2 v2.4.0 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/release-utils v0.8.1 + sigs.k8s.io/release-utils v0.8.4 sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 // indirect - cuelang.org/go v0.7.0 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 // indirect + cuelang.org/go v0.9.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -77,29 +80,30 @@ require ( github.com/alibabacloud-go/tea v1.2.1 // indirect github.com/alibabacloud-go/tea-utils v1.4.5 // indirect github.com/alibabacloud-go/tea-xml v1.1.3 // indirect - github.com/aliyun/credentials-go v1.3.1 // indirect + github.com/aliyun/credentials-go v1.3.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.9 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect + github.com/aws/smithy-go v1.20.4 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/buildkite/agent/v3 v3.62.0 // indirect - github.com/buildkite/go-pipeline v0.3.2 // indirect - github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 // indirect + github.com/buildkite/agent/v3 v3.81.0 // indirect + github.com/buildkite/go-pipeline v0.13.1 // indirect + github.com/buildkite/interpolate v0.1.3 // indirect + github.com/buildkite/roko v1.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect @@ -107,7 +111,7 @@ require ( github.com/cockroachdb/apd/v3 v3.2.1 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/coreos/go-oidc/v3 v3.10.0 // indirect + github.com/coreos/go-oidc/v3 v3.11.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect @@ -119,6 +123,7 @@ require ( github.com/docker/cli v27.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/proto v1.12.1 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect @@ -129,7 +134,7 @@ require ( github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -150,18 +155,19 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.1.8 // indirect + github.com/google/certificate-transparency-go v1.2.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-github/v55 v55.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/in-toto/attestation v1.1.0 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect @@ -169,7 +175,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect - github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -183,23 +189,23 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect - github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect + github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/oleiade/reflections v1.0.1 // indirect - github.com/open-policy-agent/opa v0.61.0 // indirect + github.com/oleiade/reflections v1.1.0 // indirect + github.com/open-policy-agent/opa v0.68.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -207,10 +213,12 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/fulcio v1.6.3 // indirect + github.com/sigstore/protobuf-specs v0.3.2 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.4 // indirect - github.com/sigstore/timestamp-authority v1.2.1 // indirect + github.com/sigstore/sigstore v1.8.9 // indirect + github.com/sigstore/sigstore-go v0.6.1 // indirect + github.com/sigstore/timestamp-authority v1.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect @@ -218,18 +226,19 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/spiffe/go-spiffe/v2 v2.1.7 // indirect + github.com/spf13/viper v1.19.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.0.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/xanzy/go-gitlab v0.96.0 // indirect + github.com/xanzy/go-gitlab v0.109.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect @@ -237,25 +246,26 @@ require ( github.com/zeebo/errs v1.3.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.step.sm/crypto v0.44.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.step.sm/crypto v0.51.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.172.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/api v0.196.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect @@ -264,7 +274,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.5 // indirect k8s.io/component-base v0.29.5 // indirect k8s.io/kube-openapi v0.0.0-20240103051144-eec4567ac022 // indirect - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect sigs.k8s.io/gateway-api v1.0.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect diff --git a/go.sum b/go.sum index f1cf1a8821..fd40dad059 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,36 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= -cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= -cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 h1:zkiIe8AxZ/kDjqQN+mDKc5BxoVJOqioSdqApjc+eB1I= -cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13/go.mod h1:XGKYSMtsJWfqQYPwq51ZygxAPqpEUj/9bdg16iDPTAA= -cuelang.org/go v0.7.0 h1:gMztinxuKfJwMIxtboFsNc6s8AxwJGgsJV+3CuLffHI= -cuelang.org/go v0.7.0/go.mod h1:ix+3dM/bSpdG9xg6qpCgnJnpeLtciZu+O/rDbywoMII= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo= +cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs= +cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= @@ -110,52 +117,52 @@ github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCE github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= -github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= -github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= github.com/aquasecurity/libbpfgo v0.7.0-libbpf-1.4 h1:rQ94U12Xlz2tncE8Rxnw3vpp/9hgUIEu3/Lv0/XQM0Q= github.com/aquasecurity/libbpfgo v0.7.0-libbpf-1.4/go.mod h1:iI7QCIZ3kXG0MR+FHsDZck6cYs1y1HyZP3sMObBg0sk= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU= -github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= -github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg= -github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao= -github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= +github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 h1:v0D1LeMkA/X+JHAZWERrr+sUGOt8KrCZKnJA6KszkcE= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.7/go.mod h1:K9lwD0Rsx9+NSaJKsdAdlDK4b2G4KKOEve9PzHxPoMI= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -164,19 +171,21 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= -github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= -github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= -github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= +github.com/buildkite/agent/v3 v3.81.0 h1:JVfkng2XnsXesFXwiFwLJFkuzVu4zvoJCvedfoIXD6E= +github.com/buildkite/agent/v3 v3.81.0/go.mod h1:edJeyycODRxaFvpT22rDGwaQ5oa4eB8GjtbjgX5VpFw= +github.com/buildkite/go-pipeline v0.13.1 h1:Y9p8pQIwPtauVwNrcmTDH6+XK7jE1nLuvWVaK8oymA8= +github.com/buildkite/go-pipeline v0.13.1/go.mod h1:2HHqlSFTYgHFhzedJu0LhLs9n5c9XkYnHiQFVN5HE4U= +github.com/buildkite/interpolate v0.1.3 h1:OFEhqji1rNTRg0u9DsSodg63sjJQEb1uWbENq9fUOBM= +github.com/buildkite/interpolate v0.1.3/go.mod h1:UNVe6A+UfiBNKbhAySrBbZFZFxQ+DXr9nWen6WVt/A8= +github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= +github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cert-manager/cert-manager v1.14.5 h1:uuM1O2g2S80nxiH3eW2cZYMGiL2zmDFVdAzg8sibWuc= github.com/cert-manager/cert-manager v1.14.5/go.mod h1:fmr/cU5jiLxWj69CroDggSOa49RljUK+dU583TaQUXM= @@ -207,14 +216,14 @@ github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containers/common v0.60.2 h1:utcwp2YkO8c0mNlwRxsxfOiqfj157FRrBjxgjR6f+7o= github.com/containers/common v0.60.2/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= -github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= -github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= +github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= @@ -266,8 +275,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -282,8 +291,8 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= -github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -315,14 +324,16 @@ github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM= -github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -362,8 +373,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= -github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= +github.com/google/certificate-transparency-go v1.2.1 h1:4iW/NwzqOqYEEoCBEFP+jPbBXbLqMpq3CifMyOnDUME= +github.com/google/certificate-transparency-go v1.2.1/go.mod h1:bvn/ytAccv+I6+DGkqpvSsEdiVGramgaSC6RD3tEmeE= github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= @@ -392,8 +403,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= @@ -402,17 +413,17 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -431,16 +442,21 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE= -github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE= +github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= +github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= +github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -474,8 +490,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I= -github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -515,10 +531,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mogensen/kubernetes-split-yaml v0.4.0 h1:6Qz3Bgl2NoQFXsSLgna8mbwOB+P4W7mkX5reURM3Bt4= github.com/mogensen/kubernetes-split-yaml v0.4.0/go.mod h1:oAOVhvl6iw8kf0+5USxSfVInVfMkva4HVu2AbRZkn74= -github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= -github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= -github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= -github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -530,8 +544,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= -github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -546,8 +560,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg= -github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E= +github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= +github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -566,8 +580,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pjbgf/go-apparmor v0.1.2 h1:FvMwkThr/XjL3PLAmzpW+p+OcaUWWi92hRi9uc7BdQg= github.com/pjbgf/go-apparmor v0.1.2/go.mod h1:k4hKHV2RPt9Z1zvg9Y6nMjTgUyIMoKNdCwNsl2qURds= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -615,26 +629,32 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v2 v2.2.3 h1:WX7yawI+EXu9h7S5bZsfYCbB9XW6Jc43ctKy/NoOSiA= -github.com/sigstore/cosign/v2 v2.2.3/go.mod h1:WpMn4MBt0cI23GdHsePwO4NxhX1FOz1ITGB3ALUjFaI= -github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= -github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/cosign/v2 v2.4.1 h1:b8UXEfJFks3hmTwyxrRNrn6racpmccUycBHxDMkEPvU= +github.com/sigstore/cosign/v2 v2.4.1/go.mod h1:GvzjBeUKigI+XYnsoVQDmMAsMMc6engxztRSuxE+x9I= +github.com/sigstore/fulcio v1.6.3 h1:Mvm/bP6ELHgazqZehL8TANS1maAkRoM23CRAdkM4xQI= +github.com/sigstore/fulcio v1.6.3/go.mod h1:5SDgLn7BOUVLKe1DwOEX3wkWFu5qEmhUlWm+SFf0GH8= +github.com/sigstore/protobuf-specs v0.3.2 h1:nCVARCN+fHjlNCk3ThNXwrZRqIommIeNKWwQvORuRQo= +github.com/sigstore/protobuf-specs v0.3.2/go.mod h1:RZ0uOdJR4OB3tLQeAyWoJFbNCBFrPQdcokntde4zRBA= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w= -github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc= -github.com/sigstore/timestamp-authority v1.2.1 h1:j9RmqSAdvKgSofeltPO4x7d+1M3AXaROBzUJ+AA7L5Q= -github.com/sigstore/timestamp-authority v1.2.1/go.mod h1:Ce+vWWEf0QaKLY2u6mpwEJbmYXEVeOfUk4fQ69kE6ck= +github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= +github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/sigstore-go v0.6.1 h1:tGkkv1oDIER+QYU5MrjqlttQOVDWfSkmYwMqkJhB/cg= +github.com/sigstore/sigstore-go v0.6.1/go.mod h1:Xe5GHmUeACRFbomUWzVkf/xYCn8xVifb9DgqJrV2dIw= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 h1:2zHmUvaYCwV6LVeTo+OAkTm8ykOGzA9uFlAjwDPAUWM= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 h1:RKk4Z+qMaLORUdT7zntwMqKiYAej1VQlCswg0S7xNSY= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 h1:89Xtxj8oqZt3UlSpCP4wApFvnQ2Z/dgowW5QOVhQigI= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 h1:Zte3Oogkd8m+nu2oK3yHtGmN++TZWh2Lm6q2iSprT1M= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4= +github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= +github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= @@ -654,17 +674,17 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/spiffe/go-spiffe/v2 v2.1.7 h1:VUkM1yIyg/x8X7u1uXqSRVRCdMdfRIEdFBzpqoeASGk= -github.com/spiffe/go-spiffe/v2 v2.1.7/go.mod h1:QJDGdhXllxjxvd5B+2XnhhXB/+rC8gr+lNrtOryiWeE= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8= +github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -686,6 +706,8 @@ github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gt github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.0.1 h1:11p9tXpq10KQEujxjcIjDSivMKCMLguls7erXHZnxJQ= +github.com/theupdateframework/go-tuf/v2 v2.0.1/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= @@ -697,8 +719,8 @@ github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/xanzy/go-gitlab v0.96.0 h1:LGkZ+wSNMRtHIBaYE4Hq3dZVjprwHv3Y1+rhKU3WETs= -github.com/xanzy/go-gitlab v0.96.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= +github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -711,12 +733,12 @@ github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= -github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= -github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -729,26 +751,26 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk= -go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.step.sm/crypto v0.51.2 h1:5EiCGIMg7IvQTGmJrwRosbXeprtT80OhoS/PJarg60o= +go.step.sm/crypto v0.51.2/go.mod h1:QK7czLjN2k+uqVp5CHXxJbhc70kVRSP+0CQF3zsR5M0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -768,6 +790,7 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= @@ -804,15 +827,15 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -821,6 +844,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -852,6 +876,7 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -862,6 +887,7 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= @@ -875,11 +901,12 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -902,19 +929,19 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= +google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -979,8 +1006,8 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240103051144-eec4567ac022 h1:avRdiaB03v88Mfvum2S3BBwkNuTlmuar4LlfO9Hajko= k8s.io/kube-openapi v0.0.0-20240103051144-eec4567ac022/go.mod h1:sIV51WBTkZrlGOJMCDZDA1IaPBUDTulPpD4y7oe038k= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.4.0 h1:i+Wt5oCaMHu99guBD0yuBjdLvX7Lz8ukPbwXdR7uBMs= oras.land/oras-go/v2 v2.4.0/go.mod h1:osvtg0/ClRq1KkydMAEu/IxFieyjItcsQ4ut4PPF+f8= sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= @@ -991,8 +1018,8 @@ sigs.k8s.io/gateway-api v1.0.0 h1:iPTStSv41+d9p0xFydll6d7f7MOBGuqXM6p2/zVYMAs= sigs.k8s.io/gateway-api v1.0.0/go.mod h1:4cUgr0Lnp5FZ0Cdq8FdRwCvpiWws7LVhLHGIudLlf4c= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/release-utils v0.8.1 h1:qSA9p3vZzO6RAq7zvzupCZjR29+n3NK9DSJPe9bSf7w= -sigs.k8s.io/release-utils v0.8.1/go.mod h1:vrQ3eR1VmudgX4OUwr4pUZEkYLRms9bdbv06mr3kchQ= +sigs.k8s.io/release-utils v0.8.4 h1:4QVr3UgbyY/d9p74LBhg0njSVQofUsAZqYOzVZBhdBw= +sigs.k8s.io/release-utils v0.8.4/go.mod h1:m1bHfscTemQp+z+pLCZnkXih9n0+WukIUU70n6nFnU0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md new file mode 100644 index 0000000000..5584c350b0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -0,0 +1,261 @@ +# Changelog + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) + + +### Features + +* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b)) + +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) + + +### Features + +* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814) + + +### Bug Fixes + +* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19) + + +### Bug Fixes + +* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823) +* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15) + +### Breaking Changes + +In the below mentioned commits there were a few large breaking changes since the +last release of the module. + +1. The `Credentials` type has been moved to the root of the module as it is + becoming the core abstraction for the whole module. +2. Because of the above mentioned change many functions that previously + returned a `TokenProvider` now return `Credentials`. Similarly, these + functions have been renamed to be more specific. +3. Most places that used to take an optional `TokenProvider` now accept + `Credentials`. You can make a `Credentials` from a `TokenProvider` using the + constructor found in the `auth` package. +4. The `detect` package has been renamed to `credentials`. With this change some + function signatures were also updated for better readability. +5. Derivative auth flows like `impersonate` and `downscope` have been moved to + be under the new `credentials` package. + +Although these changes are disruptive we think that they are for the best of the +long-term health of the module. We do not expect any more large breaking changes +like these in future revisions, even before 1.0.0. This version will be the +first version of the auth library that our client libraries start to use and +depend on. + +### Features + +* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6)) +* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d)) +* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670) +* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501)) +* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac)) +* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d)) +* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) +* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b)) +* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd)) +* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824)) +* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9)) +* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10) + + +### Bug Fixes + +* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136) +* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## 0.1.0 (2023-10-18) + + +### Features + +* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e)) +* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993)) +* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8)) +* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7)) +* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c)) +* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5)) +* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff)) +* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe)) +* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04)) +* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507)) +* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/cloud.google.com/go/auth/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md new file mode 100644 index 0000000000..36de276a07 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/README.md @@ -0,0 +1,4 @@ +# auth + +This module is currently EXPERIMENTAL and under active development. It is not +yet intended to be used. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go new file mode 100644 index 0000000000..bc37ea85fb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -0,0 +1,612 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" + + // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, + // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. + defaultExpiryDelta = 225 * time.Second + + universeDomainDefault = "googleapis.com" +) + +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} + + // for testing + timeNow = time.Now +) + +// TokenProvider specifies an interface for anything that can return a token. +type TokenProvider interface { + // Token returns a Token or an error. + // The Token returned must be safe to use + // concurrently. + // The returned Token must not be modified. + // The context provided must be sent along to any requests that are made in + // the implementing code. + Token(context.Context) (*Token, error) +} + +// Token holds the credential token used to authorized requests. All fields are +// considered read-only. +type Token struct { + // Value is the token used to authorize requests. It is usually an access + // token but may be other types of tokens such as ID tokens in some flows. + Value string + // Type is the type of token Value is. If uninitialized, it should be + // assumed to be a "Bearer" token. + Type string + // Expiry is the time the token is set to expire. + Expiry time.Time + // Metadata may include, but is not limited to, the body of the token + // response returned by the server. + Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values +} + +// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not +// expired. A token is considered expired if [Token.Expiry] has passed or will +// pass in the next 225 seconds. +func (t *Token) IsValid() bool { + return t.isValidWithEarlyExpiry(defaultExpiryDelta) +} + +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + +func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { + if t.isEmpty() { + return false + } + if t.Expiry.IsZero() { + return true + } + return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) +} + +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + +// Credentials holds Google credentials, including +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + json []byte + projectID CredentialsPropertyProvider + quotaProjectID CredentialsPropertyProvider + // universeDomain is the default service domain for a given Cloud universe. + universeDomain CredentialsPropertyProvider + + TokenProvider +} + +// JSON returns the bytes associated with the the file used to source +// credentials if one was used. +func (c *Credentials) JSON() []byte { + return c.json +} + +// ProjectID returns the associated project ID from the underlying file or +// environment. +func (c *Credentials) ProjectID(ctx context.Context) (string, error) { + if c.projectID == nil { + return internal.GetProjectID(c.json, ""), nil + } + v, err := c.projectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetProjectID(c.json, v), nil +} + +// QuotaProjectID returns the associated quota project ID from the underlying +// file or environment. +func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) { + if c.quotaProjectID == nil { + return internal.GetQuotaProject(c.json, ""), nil + } + v, err := c.quotaProjectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetQuotaProject(c.json, v), nil +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) { + if c.universeDomain == nil { + return universeDomainDefault, nil + } + v, err := c.universeDomain.GetProperty(ctx) + if err != nil { + return "", err + } + if v == "" { + return universeDomainDefault, nil + } + return v, err +} + +// CredentialsPropertyProvider provides an implementation to fetch a property +// value for [Credentials]. +type CredentialsPropertyProvider interface { + GetProperty(context.Context) (string, error) +} + +// CredentialsPropertyFunc is a type adapter to allow the use of ordinary +// functions as a [CredentialsPropertyProvider]. +type CredentialsPropertyFunc func(context.Context) (string, error) + +// GetProperty loads the properly value provided the given context. +func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) { + return p(ctx) +} + +// CredentialsOptions are used to configure [Credentials]. +type CredentialsOptions struct { + // TokenProvider is a means of sourcing a token for the credentials. Required. + TokenProvider TokenProvider + // JSON is the raw contents of the credentials file if sourced from a file. + JSON []byte + // ProjectIDProvider resolves the project ID associated with the + // credentials. + ProjectIDProvider CredentialsPropertyProvider + // QuotaProjectIDProvider resolves the quota project ID associated with the + // credentials. + QuotaProjectIDProvider CredentialsPropertyProvider + // UniverseDomainProvider resolves the universe domain with the credentials. + UniverseDomainProvider CredentialsPropertyProvider +} + +// NewCredentials returns new [Credentials] from the provided options. Most users +// will want to build this object a function from the +// [cloud.google.com/go/auth/credentials] package. +func NewCredentials(opts *CredentialsOptions) *Credentials { + creds := &Credentials{ + TokenProvider: opts.TokenProvider, + json: opts.JSON, + projectID: opts.ProjectIDProvider, + quotaProjectID: opts.QuotaProjectIDProvider, + universeDomain: opts.UniverseDomainProvider, + } + + return creds +} + +// CachedTokenProviderOptions provided options for configuring a +// CachedTokenProvider. +type CachedTokenProviderOptions struct { + // DisableAutoRefresh makes the TokenProvider always return the same token, + // even if it is expired. The default is false. Optional. + DisableAutoRefresh bool + // ExpireEarly configures the amount of time before a token expires, that it + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. + ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool +} + +func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { + if ctpo == nil { + return true + } + return !ctpo.DisableAutoRefresh +} + +func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { + if ctpo == nil || ctpo.ExpireEarly == 0 { + return defaultExpiryDelta + } + return ctpo.ExpireEarly +} + +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + +// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. +func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { + if ctp, ok := tp.(*cachedTokenProvider); ok { + return ctp + } + return &cachedTokenProvider{ + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), + } +} + +type cachedTokenProvider struct { + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool + + mu sync.Mutex + cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool +} + +func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + c.tokenAsync(ctx) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { + c.mu.Lock() + defer c.mu.Unlock() + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { + return c.cachedToken, nil + } + t, err := c.tp.Token(ctx) + if err != nil { + return nil, err + } + c.cachedToken = t + return t, nil +} + +// Error is a error associated with retrieving a [Token]. It can hold useful +// additional details for debugging. +type Error struct { + // Response is the HTTP response associated with error. The body will always + // be already closed and consumed. + Response *http.Response + // Body is the HTTP response body. + Body []byte + // Err is the underlying wrapped error. + Err error + + // code returned in the token response + code string + // description returned in the token response + description string + // uri returned in the token response + uri string +} + +func (e *Error) Error() string { + if e.code != "" { + s := fmt.Sprintf("auth: %q", e.code) + if e.description != "" { + s += fmt.Sprintf(" %q", e.description) + } + if e.uri != "" { + s += fmt.Sprintf(" %q", e.uri) + } + return s + } + return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body) +} + +// Temporary returns true if the error is considered temporary and may be able +// to be retried. +func (e *Error) Temporary() bool { + if e.Response == nil { + return false + } + sc := e.Response.StatusCode + return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests +} + +func (e *Error) Unwrap() error { + return e.Err +} + +// Style describes how the token endpoint wants to receive the ClientID and +// ClientSecret. +type Style int + +const ( + // StyleUnknown means the value has not been initiated. Sending this in + // a request will cause the token exchange to fail. + StyleUnknown Style = iota + // StyleInParams sends client info in the body of a POST request. + StyleInParams + // StyleInHeader sends client info using Basic Authorization header. + StyleInHeader +) + +// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow. +type Options2LO struct { + // Email is the OAuth2 client ID. This value is set as the "iss" in the + // JWT. + Email string + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. It is used to sign + // the JWT created. + PrivateKey []byte + // TokenURL is th URL the JWT is sent to. Required. + TokenURL string + // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the + // "kid" in the JWT header. Optional. + PrivateKeyID string + // Subject is the used for to impersonate a user. It is used as the "sub" in + // the JWT.m Optional. + Subject string + // Scopes specifies requested permissions for the token. Optional. + Scopes []string + // Expires specifies the lifetime of the token. Optional. + Expires time.Duration + // Audience specifies the "aud" in the JWT. Optional. + Audience string + // PrivateClaims allows specifying any custom claims for the JWT. Optional. + PrivateClaims map[string]interface{} + + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // UseIDToken requests that the token returned be an ID token if one is + // returned from the server. Optional. + UseIDToken bool +} + +func (o *Options2LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func (o *Options2LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.Email == "" { + return errors.New("auth: email must be provided") + } + if len(o.PrivateKey) == 0 { + return errors.New("auth: private key must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + return nil +} + +// New2LOTokenProvider returns a [TokenProvider] from the provided options. +func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return tokenProvider2LO{opts: opts, Client: opts.client()}, nil +} + +type tokenProvider2LO struct { + opts *Options2LO + Client *http.Client +} + +func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { + pk, err := internal.ParseKey(tp.opts.PrivateKey) + if err != nil { + return nil, err + } + claimSet := &jwt.Claims{ + Iss: tp.opts.Email, + Scope: strings.Join(tp.opts.Scopes, " "), + Aud: tp.opts.TokenURL, + AdditionalClaims: tp.opts.PrivateClaims, + Sub: tp.opts.Subject, + } + if t := tp.opts.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := tp.opts.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = tp.opts.PrivateKeyID + payload, err := jwt.EncodeJWS(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, &Error{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + token := &Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + token.Metadata = make(map[string]interface{}) + json.Unmarshal(body, &token.Metadata) // no error checks for optional fields + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jwt.DecodeJWS(v) + if err != nil { + return nil, fmt.Errorf("auth: error decoding JWT token: %w", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if tp.opts.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("auth: response doesn't have JWT token") + } + token.Value = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go new file mode 100644 index 0000000000..6f70fa353b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -0,0 +1,86 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" +) + +var ( + computeTokenMetadata = map[string]interface{}{ + "auth.google.tokenSource": "compute-metadata", + "auth.google.serviceAccount": "default", + } + computeTokenURI = "instance/service-accounts/default/token" +) + +// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that +// uses the metadata service to retrieve tokens. +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, + }) +} + +// computeProvider fetches tokens from the google cloud metadata service. +type computeProvider struct { + scopes []string +} + +type metadataTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` +} + +func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { + tokenURI, err := url.Parse(computeTokenURI) + if err != nil { + return nil, err + } + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI.RawQuery = v.Encode() + } + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + var res metadataTokenResp + if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { + return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, errors.New("credentials: incomplete token received from metadata") + } + return &auth.Token{ + Value: res.AccessToken, + Type: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + Metadata: computeTokenMetadata, + }, nil + +} diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go new file mode 100644 index 0000000000..010afc37c8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -0,0 +1,262 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" +) + +const ( + // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow. + jwtTokenURL = "https://oauth2.googleapis.com/token" + + // Google's OAuth 2.0 default endpoints. + googleAuthURL = "https://accounts.google.com/o/oauth2/auth" + googleTokenURL = "https://oauth2.googleapis.com/token" + + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + + // Help on default credentials + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +) + +var ( + // for testing + allowOnGCECheck = true +) + +// OnGCE reports whether this process is running in Google Cloud. +func OnGCE() bool { + // TODO(codyoss): once all libs use this auth lib move metadata check here + return allowOnGCECheck && metadata.OnGCE() +} + +// DetectDefault searches for "Application Default Credentials" and returns +// a credential based on the [DetectOptions] provided. +// +// It looks for credentials in the following places, preferring the first +// location found: +// +// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS +// environment variable. For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation +// on how to generate the JSON configuration file for on-prem/non-Google +// cloud platforms. +// - A JSON file in a location known to the gcloud command-line tool. On +// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On +// other systems, $HOME/.config/gcloud/application_default_credentials.json. +// - On Google Compute Engine, Google App Engine standard second generation +// runtimes, and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if len(opts.CredentialsJSON) > 0 { + return readCredentialsFileJSON(opts.CredentialsJSON, opts) + } + if opts.CredentialsFile != "" { + return readCredentialsFile(opts.CredentialsFile, opts) + } + if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err + } + return creds, nil + } + + fileName := credsfile.GetWellKnownFileName() + if b, err := os.ReadFile(fileName); err == nil { + return readCredentialsFileJSON(b, opts) + } + + if OnGCE() { + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + }), nil + } + + return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL) +} + +// DetectOptions provides configuration for [DetectDefault]. +type DetectOptions struct { + // Scopes that credentials tokens should have. Example: + // https://www.googleapis.com/auth/cloud-platform. Required if Audience is + // not provided. + Scopes []string + // Audience that credentials tokens should have. Only applicable for 2LO + // flows with service accounts. If specified, scopes should not be provided. + Audience string + // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + // EarlyTokenRefresh configures how early before a token expires that it + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. + EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool + // AuthHandlerOptions configures an authorization handler and other options + // for 3LO flows. It is required, and only used, for client credential + // flows. + AuthHandlerOptions *auth.AuthorizationHandlerOptions + // TokenURL allows to set the token endpoint for user credential flows. If + // unset the default value is: https://oauth2.googleapis.com/token. + // Optional. + TokenURL string + // STSAudience is the audience sent to when retrieving an STS token. + // Currently this only used for GDCH auth flow, for which it is required. + STSAudience string + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. If provided, CredentialsJSON must not be. + // Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. If provided, CredentialsFile must not be. + // Optional. + CredentialsJSON []byte + // UseSelfSignedJWT directs service account based credentials to create a + // self-signed JWT with the private key found in the file, skipping any + // network requests that would normally be made. Optional. + UseSelfSignedJWT bool + // Client configures the underlying client used to make network requests + // when fetching tokens. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This option is ignored for + // authentication flows that do not support universe domain. Optional. + UniverseDomain string +} + +func (o *DetectOptions) validate() error { + if o == nil { + return errors.New("credentials: options must be provided") + } + if len(o.Scopes) > 0 && o.Audience != "" { + return errors.New("credentials: both scopes and audience were provided") + } + if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" { + return errors.New("credentials: both credentials file and JSON were provided") + } + return nil +} + +func (o *DetectOptions) tokenURL() string { + if o.TokenURL != "" { + return o.TokenURL + } + return googleTokenURL +} + +func (o *DetectOptions) scopes() []string { + scopes := make([]string, len(o.Scopes)) + copy(scopes, o.Scopes) + return scopes +} + +func (o *DetectOptions) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return readCredentialsFileJSON(b, opts) +} + +func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + // attempt to parse jsonData as a Google Developers Console client_credentials.json. + config := clientCredConfigFromJSON(b, opts) + if config != nil { + if config.AuthHandlerOpts == nil { + return nil, errors.New("credentials: auth handler must be specified for this credential filetype") + } + tp, err := auth.New3LOTokenProvider(config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + }), nil + } + return fileCredentials(b, opts) +} + +func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { + var creds credsfile.ClientCredentialsFile + var c *credsfile.Config3LO + if err := json.Unmarshal(b, &creds); err != nil { + return nil + } + switch { + case creds.Web != nil: + c = creds.Web + case creds.Installed != nil: + c = creds.Installed + default: + return nil + } + if len(c.RedirectURIs) < 1 { + return nil + } + var handleOpts *auth.AuthorizationHandlerOptions + if opts.AuthHandlerOptions != nil { + handleOpts = &auth.AuthorizationHandlerOptions{ + Handler: opts.AuthHandlerOptions.Handler, + State: opts.AuthHandlerOptions.State, + PKCEOpts: opts.AuthHandlerOptions.PKCEOpts, + } + } + return &auth.Options3LO{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: opts.scopes(), + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + Client: opts.client(), + EarlyTokenExpiry: opts.EarlyTokenRefresh, + AuthHandlerOpts: handleOpts, + // TODO(codyoss): refactor this out. We need to add in auto-detection + // for this use case. + AuthStyle: auth.StyleInParams, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go new file mode 100644 index 0000000000..1dbb2866b9 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. It supports the Web server flow, +// client-side credentials, service accounts, Google Compute Engine service +// accounts, Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # Credentials +// +// The [cloud.google.com/go/auth.Credentials] type represents Google +// credentials, including Application Default Credentials. +// +// Use [DetectDefault] to obtain Application Default Credentials. +// +// Application Default Credentials support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage, and +// store service account private keys locally. +// +// # Workforce Identity Federation +// +// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount]. +package credentials diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go new file mode 100644 index 0000000000..cf56b025a2 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -0,0 +1,219 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "errors" + "fmt" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/externalaccount" + "cloud.google.com/go/auth/credentials/internal/externalaccountuser" + "cloud.google.com/go/auth/credentials/internal/gdch" + "cloud.google.com/go/auth/credentials/internal/impersonate" + internalauth "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + fileType, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + + var projectID, universeDomain string + var tp auth.TokenProvider + switch fileType { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.ProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.UserCredentialsKey: + f, err := credsfile.ParseUserCredentials(b) + if err != nil { + return nil, err + } + tp, err = handleUserCredential(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ExternalAccountKey: + f, err := credsfile.ParseExternalAccount(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.ExternalAccountAuthorizedUserKey: + f, err := credsfile.ParseExternalAccountAuthorizedUser(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccountAuthorizedUser(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ImpersonatedServiceAccountKey: + f, err := credsfile.ParseImpersonatedServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleImpersonatedServiceAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.GDCHServiceAccountKey: + f, err := credsfile.ParseGDCHServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleGDCHServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.Project + universeDomain = f.UniverseDomain + default: + return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + }), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override + UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), + }), nil +} + +// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to +// support configuring universe-specific credentials in code. Auth flows +// unsupported for universe domain should not use this func, but should instead +// simply set the file universe domain on the credentials. +func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string { + if optsUniverseDomain != "" { + return optsUniverseDomain + } + return fileUniverseDomain +} + +func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if opts.UseSelfSignedJWT { + return configureSelfSignedJWT(f, opts) + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + Client: opts.client(), + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + return auth.New2LOTokenProvider(opts2LO) +} + +func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { + opts3LO := &auth.Options3LO{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + AuthURL: googleAuthURL, + TokenURL: opts.tokenURL(), + AuthStyle: auth.StyleInParams, + EarlyTokenExpiry: opts.EarlyTokenRefresh, + RefreshToken: f.RefreshToken, + Client: opts.client(), + } + return auth.New3LOTokenProvider(opts3LO) +} + +func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccount.Options{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: opts.scopes(), + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + Client: opts.client(), + IsDefaultClient: opts.Client == nil, + } + if f.ServiceAccountImpersonation != nil { + externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds + } + return externalaccount.NewTokenProvider(externalOpts) +} + +func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccountuser.Options{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + Client: opts.client(), + } + return externalaccountuser.NewTokenProvider(externalOpts) +} + +func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + tp, err := fileCredentials(f.CredSource, opts) + if err != nil { + return nil, err + } + return impersonate.NewTokenProvider(&impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: tp, + Delegates: f.Delegates, + Client: opts.client(), + }) +} + +func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + return gdch.NewTokenProvider(f, &gdch.Options{ + STSAudience: opts.STSAudience, + Client: opts.client(), + }) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go new file mode 100644 index 0000000000..e6f4ff8116 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go @@ -0,0 +1,133 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idtoken + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" +) + +type cachingClient struct { + client *http.Client + + // clock optionally specifies a func to return the current time. + // If nil, time.Now is used. + clock func() time.Time + + mu sync.Mutex + certs map[string]*cachedResponse +} + +func newCachingClient(client *http.Client) *cachingClient { + return &cachingClient{ + client: client, + certs: make(map[string]*cachedResponse, 2), + } +} + +type cachedResponse struct { + resp *certResponse + exp time.Time +} + +func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, error) { + if response, ok := c.get(url); ok { + return response, nil + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + resp, body, err := internal.DoRequest(c.client, req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) + } + + certResp := &certResponse{} + if err := json.Unmarshal(body, &certResp); err != nil { + return nil, err + + } + c.set(url, certResp, resp.Header) + return certResp, nil +} + +func (c *cachingClient) now() time.Time { + if c.clock != nil { + return c.clock() + } + return time.Now() +} + +func (c *cachingClient) get(url string) (*certResponse, bool) { + c.mu.Lock() + defer c.mu.Unlock() + cachedResp, ok := c.certs[url] + if !ok { + return nil, false + } + if c.now().After(cachedResp.exp) { + return nil, false + } + return cachedResp.resp, true +} + +func (c *cachingClient) set(url string, resp *certResponse, headers http.Header) { + exp := c.calculateExpireTime(headers) + c.mu.Lock() + c.certs[url] = &cachedResponse{resp: resp, exp: exp} + c.mu.Unlock() +} + +// calculateExpireTime will determine the expire time for the cache based on +// HTTP headers. If there is any difficulty reading the headers the fallback is +// to set the cache to expire now. +func (c *cachingClient) calculateExpireTime(headers http.Header) time.Time { + var maxAge int + cc := strings.Split(headers.Get("cache-control"), ",") + for _, v := range cc { + if strings.Contains(v, "max-age") { + ss := strings.Split(v, "=") + if len(ss) < 2 { + return c.now() + } + ma, err := strconv.Atoi(ss[1]) + if err != nil { + return c.now() + } + maxAge = ma + } + } + a := headers.Get("age") + if a == "" { + return c.now().Add(time.Duration(maxAge) * time.Second) + } + age, err := strconv.Atoi(a) + if err != nil { + return c.now() + } + return c.now().Add(time.Duration(maxAge-age) * time.Second) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go new file mode 100644 index 0000000000..dced1ec404 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go @@ -0,0 +1,83 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idtoken + +import ( + "context" + "fmt" + "net/url" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/compute/metadata" +) + +const identitySuffix = "instance/service-accounts/default/identity" + +// computeCredentials checks if this code is being run on GCE. If it is, it +// will use the metadata service to build a Credentials that fetches ID +// tokens. +func computeCredentials(opts *Options) (*auth.Credentials, error) { + if opts.CustomClaims != nil { + return nil, fmt.Errorf("idtoken: Options.CustomClaims can't be used with the metadata service, please provide a service account if you would like to use this feature") + } + tp := computeIDTokenProvider{ + audience: opts.Audience, + format: opts.ComputeTokenFormat, + client: *metadata.NewClient(opts.client()), + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: 5 * time.Minute, + }), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + }), nil +} + +type computeIDTokenProvider struct { + audience string + format ComputeTokenFormat + client metadata.Client +} + +func (c computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { + v := url.Values{} + v.Set("audience", c.audience) + if c.format != ComputeTokenFormatStandard { + v.Set("format", "full") + } + if c.format == ComputeTokenFormatFullWithLicense { + v.Set("licenses", "TRUE") + } + urlSuffix := identitySuffix + "?" + v.Encode() + res, err := c.client.GetWithContext(ctx, urlSuffix) + if err != nil { + return nil, err + } + if res == "" { + return nil, fmt.Errorf("idtoken: invalid empty response from metadata service") + } + return &auth.Token{ + Value: res, + Type: internal.TokenTypeBearer, + // Compute tokens are valid for one hour: + // https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#create-id + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go new file mode 100644 index 0000000000..333521c919 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idtoken + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/credentials/impersonate" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + jwtTokenURL = "https://oauth2.googleapis.com/token" + iamCredAud = "https://iamcredentials.googleapis.com/" +) + +var ( + defaultScopes = []string{ + "https://iamcredentials.googleapis.com/", + "https://www.googleapis.com/auth/cloud-platform", + } +) + +func credsFromBytes(b []byte, opts *Options) (*auth.Credentials, error) { + t, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + switch t { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + TokenURL: f.TokenURL, + UseIDToken: true, + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + + var customClaims map[string]interface{} + if opts != nil { + customClaims = opts.CustomClaims + } + if customClaims == nil { + customClaims = make(map[string]interface{}) + } + customClaims["target_audience"] = opts.Audience + + opts2LO.PrivateClaims = customClaims + tp, err := auth.New2LOTokenProvider(opts2LO) + if err != nil { + return nil, err + } + tp = auth.NewCachedTokenProvider(tp, nil) + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + ProjectIDProvider: internal.StaticCredentialsProperty(f.ProjectID), + UniverseDomainProvider: internal.StaticCredentialsProperty(f.UniverseDomain), + }), nil + case credsfile.ImpersonatedServiceAccountKey, credsfile.ExternalAccountKey: + type url struct { + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + } + var accountURL url + if err := json.Unmarshal(b, &accountURL); err != nil { + return nil, err + } + account := filepath.Base(accountURL.ServiceAccountImpersonationURL) + account = strings.Split(account, ":")[0] + + baseCreds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: defaultScopes, + CredentialsJSON: b, + Client: opts.client(), + UseSelfSignedJWT: true, + }) + if err != nil { + return nil, err + } + + config := impersonate.IDTokenOptions{ + Audience: opts.Audience, + TargetPrincipal: account, + IncludeEmail: true, + Client: opts.client(), + Credentials: baseCreds, + } + creds, err := impersonate.NewIDTokenCredentials(&config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: creds, + JSON: b, + ProjectIDProvider: auth.CredentialsPropertyFunc(baseCreds.ProjectID), + UniverseDomainProvider: auth.CredentialsPropertyFunc(baseCreds.UniverseDomain), + QuotaProjectIDProvider: auth.CredentialsPropertyFunc(baseCreds.QuotaProjectID), + }), nil + default: + return nil, fmt.Errorf("idtoken: unsupported credentials type: %v", t) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go new file mode 100644 index 0000000000..b66c6551e6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package idtoken provides functionality for generating and validating ID +// tokens, with configurable options for audience, custom claims, and token +// formats. +// +// For more information on ID tokens, see +// https://cloud.google.com/docs/authentication/token-types#id. +package idtoken + +import ( + "errors" + "fmt" + "net/http" + "os" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" +) + +// ComputeTokenFormat dictates the the token format when requesting an ID token +// from the compute metadata service. +type ComputeTokenFormat int + +const ( + // ComputeTokenFormatDefault means the same as [ComputeTokenFormatFull]. + ComputeTokenFormatDefault ComputeTokenFormat = iota + // ComputeTokenFormatStandard mean only standard JWT fields will be included + // in the token. + ComputeTokenFormatStandard + // ComputeTokenFormatFull means the token will include claims about the + // virtual machine instance and its project. + ComputeTokenFormatFull + // ComputeTokenFormatFullWithLicense means the same as + // [ComputeTokenFormatFull] with the addition of claims about licenses + // associated with the instance. + ComputeTokenFormatFullWithLicense +) + +// Options for the configuration of creation of an ID token with +// [NewCredentials]. +type Options struct { + // Audience is the `aud` field for the token, such as an API endpoint the + // token will grant access to. Required. + Audience string + // ComputeTokenFormat dictates the the token format when requesting an ID + // token from the compute metadata service. Optional. + ComputeTokenFormat ComputeTokenFormat + // CustomClaims specifies private non-standard claims for an ID token. + // Optional. + CustomClaims map[string]interface{} + + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. Optional. + CredentialsJSON []byte + // Client configures the underlying client used to make network requests + // when fetching tokens. If provided this should be a fully authenticated + // client. Optional. + Client *http.Client +} + +func (o *Options) client() *http.Client { + if o == nil || o.Client == nil { + return internal.DefaultClient() + } + return o.Client +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("idtoken: opts must be provided") + } + if o.Audience == "" { + return errors.New("idtoken: audience must be specified") + } + return nil +} + +// NewCredentials creates a [cloud.google.com/go/auth.Credentials] that +// returns ID tokens configured by the opts provided. The parameter +// opts.Audience may not be empty. +func NewCredentials(opts *Options) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if b := opts.jsonBytes(); b != nil { + return credsFromBytes(b, opts) + } + if metadata.OnGCE() { + return computeCredentials(opts) + } + return nil, fmt.Errorf("idtoken: couldn't find any credentials") +} + +func (o *Options) jsonBytes() []byte { + if len(o.CredentialsJSON) > 0 { + return o.CredentialsJSON + } + var fnOverride string + if o != nil { + fnOverride = o.CredentialsFile + } + filename := credsfile.GetFileNameFromEnv(fnOverride) + if filename != "" { + b, _ := os.ReadFile(filename) + return b + } + return nil +} + +// Payload represents a decoded payload of an ID token. +type Payload struct { + Issuer string `json:"iss"` + Audience string `json:"aud"` + Expires int64 `json:"exp"` + IssuedAt int64 `json:"iat"` + Subject string `json:"sub,omitempty"` + Claims map[string]interface{} `json:"-"` +} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go new file mode 100644 index 0000000000..4b17af2021 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go @@ -0,0 +1,269 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idtoken + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "math/big" + "net/http" + "strings" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + es256KeySize int = 32 + googleIAPCertsURL string = "https://www.gstatic.com/iap/verify/public_key-jwk" + googleSACertsURL string = "https://www.googleapis.com/oauth2/v3/certs" +) + +var ( + defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient())} + // now aliases time.Now for testing. + now = time.Now +) + +// certResponse represents a list jwks. It is the format returned from known +// Google cert endpoints. +type certResponse struct { + Keys []jwk `json:"keys"` +} + +// jwk is a simplified representation of a standard jwk. It only includes the +// fields used by Google's cert endpoints. +type jwk struct { + Alg string `json:"alg"` + Crv string `json:"crv"` + Kid string `json:"kid"` + Kty string `json:"kty"` + Use string `json:"use"` + E string `json:"e"` + N string `json:"n"` + X string `json:"x"` + Y string `json:"y"` +} + +// Validator provides a way to validate Google ID Tokens +type Validator struct { + client *cachingClient +} + +// ValidatorOptions provides a way to configure a [Validator]. +type ValidatorOptions struct { + // Client used to make requests to the certs URL. Optional. + Client *http.Client +} + +// NewValidator creates a Validator that uses the options provided to configure +// a the internal http.Client that will be used to make requests to fetch JWKs. +func NewValidator(opts *ValidatorOptions) (*Validator, error) { + var client *http.Client + if opts != nil && opts.Client != nil { + client = opts.Client + } else { + client = internal.DefaultClient() + } + return &Validator{client: newCachingClient(client)}, nil +} + +// Validate is used to validate the provided idToken with a known Google cert +// URL. If audience is not empty the audience claim of the Token is validated. +// Upon successful validation a parsed token Payload is returned allowing the +// caller to validate any additional claims. +func (v *Validator) Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { + return v.validate(ctx, idToken, audience) +} + +// Validate is used to validate the provided idToken with a known Google cert +// URL. If audience is not empty the audience claim of the Token is validated. +// Upon successful validation a parsed token Payload is returned allowing the +// caller to validate any additional claims. +func Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { + return defaultValidator.validate(ctx, idToken, audience) +} + +// ParsePayload parses the given token and returns its payload. +// +// Warning: This function does not validate the token prior to parsing it. +// +// ParsePayload is primarily meant to be used to inspect a token's payload. This is +// useful when validation fails and the payload needs to be inspected. +// +// Note: A successful Validate() invocation with the same token will return an +// identical payload. +func ParsePayload(idToken string) (*Payload, error) { + _, payload, _, err := parseToken(idToken) + if err != nil { + return nil, err + } + return payload, nil +} + +func (v *Validator) validate(ctx context.Context, idToken string, audience string) (*Payload, error) { + header, payload, sig, err := parseToken(idToken) + if err != nil { + return nil, err + } + + if audience != "" && payload.Audience != audience { + return nil, fmt.Errorf("idtoken: audience provided does not match aud claim in the JWT") + } + + if now().Unix() > payload.Expires { + return nil, fmt.Errorf("idtoken: token expired: now=%v, expires=%v", now().Unix(), payload.Expires) + } + hashedContent := hashHeaderPayload(idToken) + switch header.Algorithm { + case jwt.HeaderAlgRSA256: + if err := v.validateRS256(ctx, header.KeyID, hashedContent, sig); err != nil { + return nil, err + } + case "ES256": + if err := v.validateES256(ctx, header.KeyID, hashedContent, sig); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("idtoken: expected JWT signed with RS256 or ES256 but found %q", header.Algorithm) + } + + return payload, nil +} + +func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { + certResp, err := v.client.getCert(ctx, googleSACertsURL) + if err != nil { + return err + } + j, err := findMatchingKey(certResp, keyID) + if err != nil { + return err + } + dn, err := decode(j.N) + if err != nil { + return err + } + de, err := decode(j.E) + if err != nil { + return err + } + + pk := &rsa.PublicKey{ + N: new(big.Int).SetBytes(dn), + E: int(new(big.Int).SetBytes(de).Int64()), + } + return rsa.VerifyPKCS1v15(pk, crypto.SHA256, hashedContent, sig) +} + +func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { + certResp, err := v.client.getCert(ctx, googleIAPCertsURL) + if err != nil { + return err + } + j, err := findMatchingKey(certResp, keyID) + if err != nil { + return err + } + dx, err := decode(j.X) + if err != nil { + return err + } + dy, err := decode(j.Y) + if err != nil { + return err + } + + pk := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: new(big.Int).SetBytes(dx), + Y: new(big.Int).SetBytes(dy), + } + r := big.NewInt(0).SetBytes(sig[:es256KeySize]) + s := big.NewInt(0).SetBytes(sig[es256KeySize:]) + if valid := ecdsa.Verify(pk, hashedContent, r, s); !valid { + return fmt.Errorf("idtoken: ES256 signature not valid") + } + return nil +} + +func findMatchingKey(response *certResponse, keyID string) (*jwk, error) { + if response == nil { + return nil, fmt.Errorf("idtoken: cert response is nil") + } + for _, v := range response.Keys { + if v.Kid == keyID { + return &v, nil + } + } + return nil, fmt.Errorf("idtoken: could not find matching cert keyId for the token provided") +} + +func parseToken(idToken string) (*jwt.Header, *Payload, []byte, error) { + segments := strings.Split(idToken, ".") + if len(segments) != 3 { + return nil, nil, nil, fmt.Errorf("idtoken: invalid token, token must have three segments; found %d", len(segments)) + } + // Header + dh, err := decode(segments[0]) + if err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT header: %v", err) + } + var header *jwt.Header + err = json.Unmarshal(dh, &header) + if err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT header: %v", err) + } + + // Payload + dp, err := decode(segments[1]) + if err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT claims: %v", err) + } + var payload *Payload + if err := json.Unmarshal(dp, &payload); err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload: %v", err) + } + if err := json.Unmarshal(dp, &payload.Claims); err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload claims: %v", err) + } + + // Signature + signature, err := decode(segments[2]) + if err != nil { + return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT signature: %v", err) + } + return header, payload, signature, nil +} + +// hashHeaderPayload gets the SHA256 checksum for verification of the JWT. +func hashHeaderPayload(idtoken string) []byte { + // remove the sig from the token + content := idtoken[:strings.LastIndex(idtoken, ".")] + hashed := sha256.Sum256([]byte(content)) + return hashed[:] +} + +func decode(s string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(s) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go new file mode 100644 index 0000000000..0dc45f13fb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package impersonate is used to impersonate Google Credentials. If you need +// to impersonate some credentials to use with a client library see +// [NewCredentials]. If instead you would like to create an Open +// Connect ID token using impersonation see [NewIDTokenCredentials]. +// +// # Required IAM roles +// +// In order to impersonate a service account the base service account must have +// the Service Account Token Creator role, roles/iam.serviceAccountTokenCreator, +// on the service account being impersonated. See +// https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the delgation chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +package impersonate diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go new file mode 100644 index 0000000000..e51bee7d87 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go @@ -0,0 +1,187 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/internal" +) + +// IDTokenOptions for generating an impersonated ID token. +type IDTokenOptions struct { + // Audience is the `aud` field for the token, such as an API endpoint the + // token will grant access to. Required. + Audience string + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // IncludeEmail includes the target service account's email in the token. + // The resulting token will include both an `email` and `email_verified` + // claim. Optional. + IncludeEmail bool + // Delegates are the ordered service account email addresses in a delegation + // chain. Each service account must be granted + // roles/iam.serviceAccountTokenCreator on the next service account in the + // chain. Optional. + Delegates []string + + // Credentials used to fetch the ID token. If not provided, and a Client is + // also not provided, base credentials will try to be detected from the + // environment. Optional. + Credentials *auth.Credentials + // Client configures the underlying client used to make network requests + // when fetching tokens. If provided the client should provide it's own + // base credentials at call time. Optional. + Client *http.Client +} + +func (o *IDTokenOptions) validate() error { + if o == nil { + return errors.New("impersonate: options must be provided") + } + if o.Audience == "" { + return errors.New("impersonate: audience must be provided") + } + if o.TargetPrincipal == "" { + return errors.New("impersonate: target service account must be provided") + } + return nil +} + +var ( + defaultScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewIDTokenCredentials creates an impersonated +// [cloud.google.com/go/auth/Credentials] that returns ID tokens configured +// with the provided config and using credentials loaded from Application +// Default Credentials as the base credentials if not provided with the opts. +// The tokens produced are valid for one hour and are automatically refreshed. +func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + var client *http.Client + var creds *auth.Credentials + if opts.Client == nil && opts.Credentials == nil { + var err error + // TODO: test not signed jwt more + creds, err = credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{defaultScope}, + UseSelfSignedJWT: true, + }) + if err != nil { + return nil, err + } + client, err = httptransport.NewClient(&httptransport.Options{ + Credentials: creds, + }) + if err != nil { + return nil, err + } + } else if opts.Client == nil { + creds = opts.Credentials + client = internal.DefaultClient() + if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { + return nil, err + } + } else { + client = opts.Client + } + + itp := impersonatedIDTokenProvider{ + client: client, + targetPrincipal: opts.TargetPrincipal, + audience: opts.Audience, + includeEmail: opts.IncludeEmail, + } + for _, v := range opts.Delegates { + itp.delegates = append(itp.delegates, formatIAMServiceAccountName(v)) + } + + var udp auth.CredentialsPropertyProvider + if creds != nil { + udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(itp, nil), + UniverseDomainProvider: udp, + }), nil +} + +type generateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + Delegates []string `json:"delegates,omitempty"` +} + +type generateIDTokenResponse struct { + Token string `json:"token"` +} + +type impersonatedIDTokenProvider struct { + client *http.Client + + targetPrincipal string + audience string + includeEmail bool + delegates []string +} + +func (i impersonatedIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { + genIDTokenReq := generateIDTokenRequest{ + Audience: i.audience, + IncludeEmail: i.includeEmail, + Delegates: i.delegates, + } + bodyBytes, err := json.Marshal(genIDTokenReq) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + resp, body, err := internal.DoRequest(i.client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var generateIDTokenResp generateIDTokenResponse + if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: generateIDTokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go new file mode 100644 index 0000000000..91b42bc3f7 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go @@ -0,0 +1,266 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/internal" +) + +var ( + iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" + oauth2Endpoint = "https://oauth2.googleapis.com" + errMissingTargetPrincipal = errors.New("impersonate: target service account must be provided") + errMissingScopes = errors.New("impersonate: scopes must be provided") + errLifetimeOverMax = errors.New("impersonate: max lifetime is 12 hours") + errUniverseNotSupportedDomainWideDelegation = errors.New("impersonate: service account user is configured for the credential. " + + "Domain-wide delegation is not supported in universes other than googleapis.com") +) + +// TODO(codyoss): plumb through base for this and idtoken + +// NewCredentials returns an impersonated +// [cloud.google.com/go/auth/NewCredentials] configured with the provided options +// and using credentials loaded from Application Default Credentials as the base +// credentials if not provided with the opts. +func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + var isStaticToken bool + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically if not set. + lifetime := 1 * time.Hour + if opts.Lifetime != 0 { + lifetime = opts.Lifetime + // Don't auto-refresh token if a lifetime is configured. + isStaticToken = true + } + + var client *http.Client + var creds *auth.Credentials + if opts.Client == nil && opts.Credentials == nil { + var err error + creds, err = credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{defaultScope}, + UseSelfSignedJWT: true, + }) + if err != nil { + return nil, err + } + client, err = httptransport.NewClient(&httptransport.Options{ + Credentials: creds, + }) + if err != nil { + return nil, err + } + } else if opts.Credentials != nil { + creds = opts.Credentials + client = internal.DefaultClient() + if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { + return nil, err + } + } else { + client = opts.Client + } + + // If a subject is specified a domain-wide delegation auth-flow is initiated + // to impersonate as the provided subject (user). + if opts.Subject != "" { + if !opts.isUniverseDomainGDU() { + return nil, errUniverseNotSupportedDomainWideDelegation + } + tp, err := user(opts, client, lifetime, isStaticToken) + if err != nil { + return nil, err + } + var udp auth.CredentialsPropertyProvider + if creds != nil { + udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + UniverseDomainProvider: udp, + }), nil + } + + its := impersonatedTokenProvider{ + client: client, + targetPrincipal: opts.TargetPrincipal, + lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), + } + for _, v := range opts.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) + } + its.scopes = make([]string, len(opts.Scopes)) + copy(its.scopes, opts.Scopes) + + var tpo *auth.CachedTokenProviderOptions + if isStaticToken { + tpo = &auth.CachedTokenProviderOptions{ + DisableAutoRefresh: true, + } + } + + var udp auth.CredentialsPropertyProvider + if creds != nil { + udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(its, tpo), + UniverseDomainProvider: udp, + }), nil +} + +// CredentialsOptions for generating an impersonated credential token. +type CredentialsOptions struct { + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // Lifetime is the amount of time until the impersonated token expires. If + // unset the token's lifetime will be one hour and be automatically + // refreshed. If set the token may have a max lifetime of one hour and will + // not be refreshed. Service accounts that have been added to an org policy + // with constraints/iam.allowServiceAccountCredentialLifetimeExtension may + // request a token lifetime of up to 12 hours. Optional. + Lifetime time.Duration + // Subject is the sub field of a JWT. This field should only be set if you + // wish to impersonate as a user. This feature is useful when using domain + // wide delegation. Optional. + Subject string + + // Credentials is the provider of the credentials used to fetch the ID + // token. If not provided, and a Client is also not provided, credentials + // will try to be detected from the environment. Optional. + Credentials *auth.Credentials + // Client configures the underlying client used to make network requests + // when fetching tokens. If provided the client should provide it's own + // credentials at call time. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". Optional. + UniverseDomain string +} + +func (o *CredentialsOptions) validate() error { + if o == nil { + return errors.New("impersonate: options must be provided") + } + if o.TargetPrincipal == "" { + return errMissingTargetPrincipal + } + if len(o.Scopes) == 0 { + return errMissingScopes + } + if o.Lifetime.Hours() > 12 { + return errLifetimeOverMax + } + return nil +} + +// getUniverseDomain is the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (o *CredentialsOptions) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *CredentialsOptions) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenRequest struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenProvider struct { + client *http.Client + + targetPrincipal string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenProvider) Token(ctx context.Context) (*auth.Token, error) { + reqBody := generateAccessTokenRequest{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + resp, body, err := internal.DoRequest(i.client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %w", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go new file mode 100644 index 0000000000..1acaaa922d --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go @@ -0,0 +1,174 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +// user provides an auth flow for domain-wide delegation, setting +// CredentialsConfig.Subject to be the impersonated user. +func user(opts *CredentialsOptions, client *http.Client, lifetime time.Duration, isStaticToken bool) (auth.TokenProvider, error) { + u := userTokenProvider{ + client: client, + targetPrincipal: opts.TargetPrincipal, + subject: opts.Subject, + lifetime: lifetime, + } + u.delegates = make([]string, len(opts.Delegates)) + for i, v := range opts.Delegates { + u.delegates[i] = formatIAMServiceAccountName(v) + } + u.scopes = make([]string, len(opts.Scopes)) + copy(u.scopes, opts.Scopes) + var tpo *auth.CachedTokenProviderOptions + if isStaticToken { + tpo = &auth.CachedTokenProviderOptions{ + DisableAutoRefresh: true, + } + } + return auth.NewCachedTokenProvider(u, tpo), nil +} + +type claimSet struct { + Iss string `json:"iss"` + Scope string `json:"scope,omitempty"` + Sub string `json:"sub,omitempty"` + Aud string `json:"aud"` + Iat int64 `json:"iat"` + Exp int64 `json:"exp"` +} + +type signJWTRequest struct { + Payload string `json:"payload"` + Delegates []string `json:"delegates,omitempty"` +} + +type signJWTResponse struct { + // KeyID is the key used to sign the JWT. + KeyID string `json:"keyId"` + // SignedJwt contains the automatically generated header; the + // client-supplied payload; and the signature, which is generated using + // the key referenced by the `kid` field in the header. + SignedJWT string `json:"signedJwt"` +} + +type exchangeTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` +} + +type userTokenProvider struct { + client *http.Client + + targetPrincipal string + subject string + scopes []string + lifetime time.Duration + delegates []string +} + +func (u userTokenProvider) Token(ctx context.Context) (*auth.Token, error) { + signedJWT, err := u.signJWT(ctx) + if err != nil { + return nil, err + } + return u.exchangeToken(ctx, signedJWT) +} + +func (u userTokenProvider) signJWT(ctx context.Context) (string, error) { + now := time.Now() + exp := now.Add(u.lifetime) + claims := claimSet{ + Iss: u.targetPrincipal, + Scope: strings.Join(u.scopes, " "), + Sub: u.subject, + Aud: fmt.Sprintf("%s/token", oauth2Endpoint), + Iat: now.Unix(), + Exp: exp.Unix(), + } + payloadBytes, err := json.Marshal(claims) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal claims: %w", err) + } + signJWTReq := signJWTRequest{ + Payload: string(payloadBytes), + Delegates: u.delegates, + } + + bodyBytes, err := json.Marshal(signJWTReq) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) + req, err := http.NewRequestWithContext(ctx, "POST", reqURL, bytes.NewReader(bodyBytes)) + if err != nil { + return "", fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + resp, body, err := internal.DoRequest(u.client, req) + if err != nil { + return "", fmt.Errorf("impersonate: unable to sign JWT: %w", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var signJWTResp signJWTResponse + if err := json.Unmarshal(body, &signJWTResp); err != nil { + return "", fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return signJWTResp.SignedJWT, nil +} + +func (u userTokenProvider) exchangeToken(ctx context.Context, signedJWT string) (*auth.Token, error) { + v := url.Values{} + v.Set("grant_type", "assertion") + v.Set("assertion_type", "http://oauth.net/grant_type/jwt/1.0/bearer") + v.Set("assertion", signedJWT) + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("%s/token", oauth2Endpoint), strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + resp, body, err := internal.DoRequest(u.client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to exchange token: %w", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp exchangeTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + + return &auth.Token{ + Value: tokenResp.AccessToken, + Type: tokenResp.TokenType, + Expiry: time.Now().Add(time.Second * time.Duration(tokenResp.ExpiresIn)), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go new file mode 100644 index 0000000000..a34f6b06f8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -0,0 +1,522 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +var ( + // getenv aliases os.Getenv for testing + getenv = os.Getenv +) + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTTL = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // Supported AWS configuration environment variables. + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnvVar = "AWS_REGION" + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" + awsProviderType = "aws" +) + +type awsSubjectProvider struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + IMDSv2SessionTokenURL string + TargetResource string + requestSigner *awsRequestSigner + region string + securityCredentialsProvider AwsSecurityCredentialsProvider + reqOpts *RequestOptions + + Client *http.Client +} + +func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { + // Set Defaults + if sp.RegionalCredVerificationURL == "" { + sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL + } + if sp.requestSigner == nil { + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if sp.TargetResource != "" { + req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource) + } + sp.requestSigner.signRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (sp *awsSubjectProvider) providerType() string { + if sp.securityCredentialsProvider != nil { + return programmaticProviderType + } + return awsProviderType +} + +func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) { + if sp.IMDSv2SessionTokenURL == "" { + return "", nil + } + req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) + } + return string(body), nil +} + +func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv(awsDefaultRegionEnvVar), nil + } + + if sp.RegionURL == "" { + return "", errors.New("credentials: unable to determine AWS region") + } + + req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + bodyLen := len(body) + if bodyLen == 0 { + return "", nil + } + return string(body[:bodyLen-1]), nil +} + +func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyIDEnvVar), + SecretAccessKey: getenv(awsSecretAccessKeyEnvVar), + SessionToken: getenv(awsSessionTokenEnvVar), + }, nil + } + + roleName, err := sp.getMetadataRoleName(ctx, headers) + if err != nil { + return + } + credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("credentials: missing AccessKeyId credential") + } + if credentials.SecretAccessKey == "" { + return result, errors.New("credentials: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) { + var result *AwsSecurityCredentials + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return result, err + } + if resp.StatusCode != http.StatusOK { + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err + } + return result, nil +} + +func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { + if sp.CredVerificationURL == "" { + return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint") + } + req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil) + if err != nil { + return "", err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) + } + return string(body), nil +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// signRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) signRequest(req *http.Request) error { + // req is assumed non-nil + signedRequest := cloneRequest(req) + timestamp := Now() + signedRequest.Header.Set("host", requestHost(req)) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/") + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n") + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = internal.ReadAll(requestBody) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is +// required. +func canRetrieveRegionFromEnvironment() bool { + return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != "" +} + +// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. +func canRetrieveSecurityCredentialFromEnvironment() bool { + return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != "" +} + +func (sp *awsSubjectProvider) shouldUseMetadataServer() bool { + return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go new file mode 100644 index 0000000000..d5765c4749 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go @@ -0,0 +1,284 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +const ( + executableSupportedMaxVersion = 1 + executableDefaultTimeout = 30 * time.Second + executableSource = "response" + executableProviderType = "executable" + outputFileSource = "output file" + + allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" + idTokenType = "urn:ietf:params:oauth:token-type:id_token" + saml2TokenType = "urn:ietf:params:oauth:token-type:saml2" +) + +var ( + serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`) +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +// environment is a contract for testing +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError) + } + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableSubjectProvider struct { + Command string + Timeout time.Duration + OutputFile string + client *http.Client + opts *Options + env environment +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IDToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + // Validate + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + if result.Success == nil { + return "", missingFieldError(source, "success") + } + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + if result.ExpirationTime == 0 && sp.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + switch result.TokenType { + case jwtTokenType, idTokenType: + if result.IDToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IDToken, nil + case saml2TokenType: + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + default: + return "", tokenTypeError(source) + } +} + +func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) { + if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + return sp.getTokenFromExecutableCommand(ctx) +} + +func (sp *executableSubjectProvider) providerType() string { + return executableProviderType +} + +func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) { + if sp.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(sp.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := internal.ReadAll(file) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (sp *executableSubjectProvider) executableEnvironment() []string { + result := sp.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if sp.opts.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if sp.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile)) + } + return result +} + +func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if sp.env.getenv(allowExecutablesEnvVar) != "1" { + return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + } + + ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout)) + defer cancel() + + output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment()) + if err != nil { + return "", err + } + return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix()) +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("credentials: %q missing %q field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("credentials: unable to parse %q: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"credentials: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("credentials: %v contains unsupported token type", source) +} + +func exitCodeError(err *exec.ExitError) error { + return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err) +} + +func executableError(err error) error { + return fmt.Errorf("credentials: executable command failed: %w", err) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go new file mode 100644 index 0000000000..112186a9e6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -0,0 +1,407 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/impersonate" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +var ( + // Now aliases time.Now for testing + Now = func() time.Time { + return time.Now().UTC() + } + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +// Options stores the configuration for fetching tokens with external credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource *credsfile.CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + // WorkforcePoolUserProject should be set when it is a workforce pool and + // not a workload identity pool. The underlying principal must still have + // serviceusage.services.use IAM permission to use the project for + // billing/quota. Optional. + WorkforcePoolUserProject string + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string + // SubjectTokenProvider is an optional token provider for OIDC/SAML + // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider + // or CredentialSource must be provided. Optional. + SubjectTokenProvider SubjectTokenProvider + // AwsSecurityCredentialsProvider is an AWS Security Credential provider + // for AWS credentials. One of SubjectTokenProvider, + // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional. + AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider + // Client for token request. + Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool +} + +// SubjectTokenProvider can be used to supply a subject token to exchange for a +// GCP access token. +type SubjectTokenProvider interface { + // SubjectToken should return a valid subject token or an error. + // The external account token provider does not cache the returned subject + // token, so caching logic should be implemented in the provider to prevent + // multiple requests for the same subject token. + SubjectToken(ctx context.Context, opts *RequestOptions) (string, error) +} + +// RequestOptions contains information about the requested subject token or AWS +// security credentials from the Google external account credential. +type RequestOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external + // account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials +// and an AWS Region to exchange for a GCP access token. +type AwsSecurityCredentialsProvider interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, opts *RequestOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of + // AwsSecurityCredentials or an error. The external account token provider + // does not cache the returned security credentials, so caching logic should + // be implemented in the provider to prevent multiple requests for the + // same security credentials. + AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error) +} + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for + // temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +func (o *Options) validate() error { + if o.Audience == "" { + return fmt.Errorf("externalaccount: Audience must be set") + } + if o.SubjectTokenType == "" { + return fmt.Errorf("externalaccount: Subject token type must be set") + } + if o.WorkforcePoolUserProject != "" { + if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid { + return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials") + } + } + count := 0 + if o.CredentialSource != nil { + count++ + } + if o.SubjectTokenProvider != nil { + count++ + } + if o.AwsSecurityCredentialsProvider != nil { + count++ + } + if count == 0 { + return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + if count > 1 { + return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + return nil +} + +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + +// resolveTokenURL sets the default STS token endpoint with the configured +// universe domain. +func (o *Options) resolveTokenURL() { + if o.TokenURL != "" { + return + } else if o.UniverseDomain != "" { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1) + } else { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + opts.resolveTokenURL() + stp, err := newSubjectTokenProvider(opts) + if err != nil { + return nil, err + } + + client, err := opts.client() + if err != nil { + return nil, err + } + + tp := &tokenProvider{ + client: client, + opts: opts, + stp: stp, + } + + if opts.ServiceAccountImpersonationURL == "" { + return auth.NewCachedTokenProvider(tp, nil), nil + } + + scopes := make([]string, len(opts.Scopes)) + copy(scopes, opts.Scopes) + // needed for impersonation + tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp, err := impersonate.NewTokenProvider(&impersonate.Options{ + Client: client, + URL: opts.ServiceAccountImpersonationURL, + Scopes: scopes, + Tp: auth.NewCachedTokenProvider(tp, nil), + TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + }) + if err != nil { + return nil, err + } + return auth.NewCachedTokenProvider(imp, nil), nil +} + +type subjectTokenProvider interface { + subjectToken(ctx context.Context) (string, error) + providerType() string +} + +// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. +type tokenProvider struct { + client *http.Client + opts *Options + stp subjectTokenProvider +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + subjectToken, err := tp.stp.subjectToken(ctx) + if err != nil { + return nil, err + } + + stsRequest := &stsexchange.TokenRequest{ + GrantType: stsexchange.GrantType, + Audience: tp.opts.Audience, + Scope: tp.opts.Scopes, + RequestedTokenType: stsexchange.TokenType, + SubjectToken: subjectToken, + SubjectTokenType: tp.opts.SubjectTokenType, + } + header := make(http.Header) + header.Set("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: tp.opts.ClientID, + ClientSecret: tp.opts.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" { + options = map[string]interface{}{ + "userProject": tp.opts.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{ + Client: tp.client, + Endpoint: tp.opts.TokenURL, + Request: stsRequest, + Authentication: clientAuth, + Headers: header, + ExtraOpts: options, + }) + if err != nil { + return nil, err + } + + tok := &auth.Token{ + Value: stsResp.AccessToken, + Type: stsResp.TokenType, + } + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("credentials: got invalid expiry from security token service") + } + tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + return tok, nil +} + +// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a +// subjectTokenProvider +func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} + if o.AwsSecurityCredentialsProvider != nil { + return &awsSubjectProvider{ + securityCredentialsProvider: o.AwsSecurityCredentialsProvider, + TargetResource: o.Audience, + reqOpts: reqOpts, + }, nil + } else if o.SubjectTokenProvider != nil { + return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil + } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion) + } + + awsProvider := &awsSubjectProvider{ + EnvironmentID: o.CredentialSource.EnvironmentID, + RegionURL: o.CredentialSource.RegionURL, + RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: o.CredentialSource.URL, + TargetResource: o.Audience, + Client: o.Client, + } + if o.CredentialSource.IMDSv2SessionTokenURL != "" { + awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL + } + + return awsProvider, nil + } + } else if o.CredentialSource.File != "" { + return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil + } else if o.CredentialSource.URL != "" { + return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + } else if o.CredentialSource.Executable != nil { + ec := o.CredentialSource.Executable + if ec.Command == "" { + return nil, errors.New("credentials: missing `command` field — executable command must be provided") + } + + execProvider := &executableSubjectProvider{} + execProvider.Command = ec.Command + if ec.TimeoutMillis == 0 { + execProvider.Timeout = executableDefaultTimeout + } else { + execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond + if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum { + return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds()) + } + } + execProvider.OutputFile = ec.OutputFile + execProvider.client = o.Client + execProvider.opts = o + execProvider.env = runtimeEnvironment{} + return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil + } + return nil, errors.New("credentials: unable to parse credential source") +} + +func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + p.providerType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go new file mode 100644 index 0000000000..8186939fe1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileProviderType = "file" +) + +type fileSubjectProvider struct { + File string + Format *credsfile.Format +} + +func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) { + tokenFile, err := os.Open(sp.File) + if err != nil { + return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err) + } + defer tokenFile.Close() + tokenBytes, err := internal.ReadAll(tokenFile) + if err != nil { + return "", fmt.Errorf("credentials: failed to read credential file: %w", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + + if sp.Format == nil { + return string(tokenBytes), nil + } + switch sp.Format.Type { + case fileTypeJSON: + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(tokenBytes), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *fileSubjectProvider) providerType() string { + return fileProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go new file mode 100644 index 0000000000..8e4b4379b4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go @@ -0,0 +1,74 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return versionUnknown +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go similarity index 54% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go rename to vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go index 9b802db272..be3c87351f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go @@ -1,10 +1,10 @@ -// Copyright The OpenTelemetry Authors +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,9 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package externalaccount + +import "context" + +type programmaticProvider struct { + opts *RequestOptions + stp SubjectTokenProvider +} + +func (pp *programmaticProvider) providerType() string { + return programmaticProviderType +} + +func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) { + return pp.stp.SubjectToken(ctx, pp.opts) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go new file mode 100644 index 0000000000..0a020599e0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -0,0 +1,88 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileTypeText = "text" + fileTypeJSON = "json" + urlProviderType = "url" + programmaticProviderType = "programmatic" + x509ProviderType = "x509" +) + +type urlSubjectProvider struct { + URL string + Headers map[string]string + Format *credsfile.Format + Client *http.Client +} + +func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil) + if err != nil { + return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err) + } + + for key, val := range sp.Headers { + req.Header.Add(key, val) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return "", fmt.Errorf("credentials: status code %d: %s", c, body) + } + + if sp.Format == nil { + return string(body), nil + } + switch sp.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(body, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(body), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *urlSubjectProvider) providerType() string { + return urlProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 0000000000..115df5881f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go new file mode 100644 index 0000000000..0d78854798 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -0,0 +1,110 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccountuser + +import ( + "context" + "errors" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal" +) + +// Options stores the configuration for fetching tokens with external authorized +// user credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the + // resource name for the workforce pool and the provider identifier in that + // pool. + Audience string + // RefreshToken is the OAuth 2.0 refresh token. + RefreshToken string + // TokenURL is the STS token exchange endpoint for refresh. + TokenURL string + // TokenInfoURL is the STS endpoint URL for token introspection. Optional. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described + // below. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs + // to be called with the generated a cloud access token. When provided, STS + // will be called with additional basic authentication using client_id as + // username and client_secret as password. + ClientSecret string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + + // Client for token request. + Client *http.Client +} + +func (c *Options) validate() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if !opts.validate() { + return nil, errors.New("credentials: invalid external_account_authorized_user configuration") + } + + tp := &tokenProvider{ + o: opts, + } + return auth.NewCachedTokenProvider(tp, nil), nil +} + +type tokenProvider struct { + o *Options +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + opts := tp.o + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + } + headers := make(http.Header) + headers.Set("Content-Type", "application/x-www-form-urlencoded") + stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{ + Client: opts.Client, + Endpoint: opts.TokenURL, + RefreshToken: opts.RefreshToken, + Authentication: clientAuth, + Headers: headers, + }) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("credentials: invalid expiry from security token service") + } + + // guarded by the wrapping with CachedTokenProvider + if stsResponse.RefreshToken != "" { + opts.RefreshToken = stsResponse.RefreshToken + } + return &auth.Token{ + Value: stsResponse.AccessToken, + Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + Type: internal.TokenTypeBearer, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go new file mode 100644 index 0000000000..720045d3b0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -0,0 +1,184 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gdch + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // GrantType is the grant type for the token request. + GrantType = "urn:ietf:params:oauth:token-type:token-exchange" + requestTokenType = "urn:ietf:params:oauth:token-type:access_token" + subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount" +) + +var ( + gdchSupportFormatVersions map[string]bool = map[string]bool{ + "1": true, + } +) + +// Options for [NewTokenProvider]. +type Options struct { + STSAudience string + Client *http.Client +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a +// GDCH cred file. +func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) { + if !gdchSupportFormatVersions[f.FormatVersion] { + return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion) + } + if o.STSAudience == "" { + return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") + } + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, err + } + certPool, err := loadCertPool(f.CertPath) + if err != nil { + return nil, err + } + + tp := gdchProvider{ + serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), + tokenURL: f.TokenURL, + aud: o.STSAudience, + pk: pk, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, + } + return tp, nil +} + +func loadCertPool(path string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + pem, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read certificate: %w", err) + } + pool.AppendCertsFromPEM(pem) + return pool, nil +} + +type gdchProvider struct { + serviceIdentity string + tokenURL string + aud string + pk *rsa.PrivateKey + pkID string + certPool *x509.CertPool + + client *http.Client +} + +func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { + addCertToTransport(g.client, g.certPool) + iat := time.Now() + exp := iat.Add(time.Hour) + claims := jwt.Claims{ + Iss: g.serviceIdentity, + Sub: g.serviceIdentity, + Aud: g.tokenURL, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(g.pkID), + } + payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", GrantType) + v.Set("audience", g.aud) + v.Set("requested_token_type", requestTokenType) + v.Set("subject_token", payload) + v.Set("subject_token_type", subjectTokenType) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, &auth.Error{ + Response: resp, + Body: body, + } + } + + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + token := &auth.Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token.Metadata = raw + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// addCertToTransport makes a best effort attempt at adding in the cert info to +// the client. It tries to keep all configured transport settings if the +// underlying transport is an http.Transport. Or else it overwrites the +// transport with defaults adding in the certs. +func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { + trans, ok := hc.Transport.(*http.Transport) + if !ok { + trans = http.DefaultTransport.(*http.Transport).Clone() + } + trans.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go new file mode 100644 index 0000000000..ed53afa519 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -0,0 +1,146 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + defaultTokenLifetime = "3600s" + authHeaderKey = "Authorization" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return opts, nil +} + +// Options for [NewTokenProvider]. +type Options struct { + // Tp is the source credential used to generate a token on the + // impersonated service account. Required. + Tp auth.TokenProvider + + // URL is the endpoint to call to generate a token + // on behalf of the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. Defaults to 1 hour if unset. Optional. + TokenLifetimeSeconds int + // Client configures the underlying client used to make network requests + // when fetching tokens. Required. + Client *http.Client +} + +func (o *Options) validate() error { + if o.Tp == nil { + return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials") + } + if o.URL == "" { + return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials") + } + return nil +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + lifetime := defaultTokenLifetime + if o.TokenLifetimeSeconds != 0 { + lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetime, + Scope: o.Scopes, + Delegates: o.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("credentials: unable to marshal request: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if err := setAuthHeader(ctx, o.Tp, req); err != nil { + return nil, err + } + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("credentials: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + Type: internal.TokenTypeBearer, + }, nil +} + +func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { + t, err := tp.Token(ctx) + if err != nil { + return err + } + typ := t.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + r.Header.Set(authHeaderKey, typ+" "+t.Value) + return nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go new file mode 100644 index 0000000000..768a9dafc1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -0,0 +1,161 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stsexchange + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + // GrantType for a sts exchange. + GrantType = "urn:ietf:params:oauth:grant-type:token-exchange" + // TokenType for a sts exchange. + TokenType = "urn:ietf:params:oauth:token-type:access_token" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" +) + +// Options stores the configuration for making an sts exchange request. +type Options struct { + Client *http.Client + Endpoint string + Request *TokenRequest + Authentication ClientAuthentication + Headers http.Header + // ExtraOpts are optional fields marshalled into the `options` field of the + // request body. + ExtraOpts map[string]interface{} + RefreshToken string +} + +// RefreshAccessToken performs the token exchange using a refresh token flow. +func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", opts.RefreshToken) + return doRequest(ctx, opts, data) +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("audience", opts.Request.Audience) + data.Set("grant_type", GrantType) + data.Set("requested_token_type", TokenType) + data.Set("subject_token_type", opts.Request.SubjectTokenType) + data.Set("subject_token", opts.Request.SubjectToken) + data.Set("scope", strings.Join(opts.Request.Scope, " ")) + if opts.ExtraOpts != nil { + opts, err := json.Marshal(opts.ExtraOpts) + if err != nil { + return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err) + } + data.Set("options", string(opts)) + } + return doRequest(ctx, opts, data) +} + +func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { + opts.Authentication.InjectAuthentication(data, opts.Headers) + encodedData := data.Encode() + + req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err) + + } + for key, list := range opts.Headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + + resp, body, err := internal.DoRequest(opts.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + var stsResp TokenResponse + if err := json.Unmarshal(body, &stsResp); err != nil { + return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err) + } + + return &stsResp, nil +} + +// TokenRequest contains fields necessary to make an oauth2 token +// exchange. +type TokenRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// TokenResponse is used to decode the remote server response during +// an oauth2 token exchange. +type TokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// ClientAuthentication represents an OAuth client ID and secret and the +// mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + AuthStyle auth.Style + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service +// exchange request. It modifies either the passed url.Values or http.Header +// depending on the desired authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + switch c.AuthStyle { + case auth.StyleInHeader: + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go new file mode 100644 index 0000000000..b62a8ae4d5 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -0,0 +1,81 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "crypto/rsa" + "fmt" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +var ( + // for testing + now func() time.Time = time.Now +) + +// configureSelfSignedJWT uses the private key in the service account to create +// a JWT without making a network call. +func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("credentials: could not parse key: %w", err) + } + return &selfSignedTokenProvider{ + email: f.ClientEmail, + audience: opts.Audience, + scopes: opts.scopes(), + pk: pk, + pkID: f.PrivateKeyID, + }, nil +} + +type selfSignedTokenProvider struct { + email string + audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { + iat := now() + exp := iat.Add(time.Hour) + scope := strings.Join(tp.scopes, " ") + c := &jwt.Claims{ + Iss: tp.email, + Sub: tp.email, + Aud: tp.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := &jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(tp.pkID), + } + msg, err := jwt.EncodeJWS(h, c, tp.pk) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go new file mode 100644 index 0000000000..30fedf9562 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -0,0 +1,226 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. +package httptransport + +import ( + "crypto/tls" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth" + detect "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [net/http.Client] from [NewClient]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Headers are extra HTTP headers that will be appended to every outgoing + // request. + Headers http.Header + // BaseRoundTripper overrides the base transport used for serving requests. + // If specified ClientCertProvider is ignored. + BaseRoundTripper http.RoundTripper + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Credentials used to add Authorization header to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *detect.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("httptransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) resolveDetectOptions() *detect.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies the + // default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// AddAuthorizationMiddleware adds a middleware to the provided client's +// transport that sets the Authorization header with the value produced by the +// provided [cloud.google.com/go/auth.Credentials]. An error is returned only +// if client or creds is nil. +func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { + if client == nil || creds == nil { + return fmt.Errorf("httptransport: client and tp must not be nil") + } + base := client.Transport + if base == nil { + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } + } + client.Transport = &authTransport{ + creds: creds, + base: base, + // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. + } + return nil +} + +// NewClient returns a [net/http.Client] that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization headers to all outgoing requests. +func NewClient(opts *Options) (*http.Client, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + } + clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts) + if err != nil { + return nil, err + } + baseRoundTripper := opts.BaseRoundTripper + if baseRoundTripper == nil { + baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) + } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider + trans, err := newTransport(baseRoundTripper, opts) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: trans, + }, nil +} + +// SetAuthHeader uses the provided token to set the Authorization header on a +// request. If the token.Type is empty, the type is assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go new file mode 100644 index 0000000000..467c477c04 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/trace.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + cloudTraceHeader = `X-Cloud-Trace-Context` +) + +// asserts the httpFormat fulfills this foreign interface +var _ propagation.HTTPFormat = (*httpFormat)(nil) + +// httpFormat implements propagation.httpFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Cloud Trace. +type httpFormat struct{} + +// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. +func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(cloudTraceHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 32) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Cloud Trace header. +func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(cloudTraceHeader, header) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go new file mode 100644 index 0000000000..274bb01254 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -0,0 +1,220 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/cert" + "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" +) + +const ( + quotaProjectHeaderKey = "X-goog-user-project" +) + +func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { + var headers = opts.Headers + ht := &headerTransport{ + base: base, + headers: headers, + } + var trans http.RoundTripper = ht + trans = addOCTransport(trans, opts) + switch { + case opts.DisableAuthentication: + // Do nothing. + case opts.APIKey != "": + qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey)) + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + trans = &apiKeyTransport{ + Transport: trans, + Key: opts.APIKey, + } + default: + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + qp, err := creds.QuotaProjectID(context.Background()) + if err != nil { + return nil, err + } + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } + } + creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) + trans = &authTransport{ + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + } + } + return trans, nil +} + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + // Configures the ReadIdleTimeout HTTP/2 option for the + // transport. This allows broken idle connections to be pruned more quickly, + // preventing the client from attempting to re-use connections that will no + // longer work. + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } + + return trans +} + +type apiKeyTransport struct { + // Key is the API Key to set on requests. + Key string + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return t.Transport.RoundTrip(&newReq) +} + +type headerTransport struct { + headers http.Header + base http.RoundTripper +} + +func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + + for k, v := range t.headers { + newReq.Header[k] = v + } + + return rt.RoundTrip(&newReq) +} + +func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &httpFormat{}, + } +} + +type authTransport struct { + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string +} + +// getClientUniverseDomain returns the universe domain configured for the client. +// The default value is "googleapis.com". +func (t *authTransport) getClientUniverseDomain() string { + if t.clientUniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return t.clientUniverseDomain +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. Per the RoundTripper contract we must +// not modify the initial request, so we clone it, and we must close the body +// on any errors that happens during our token logic. +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + token, err := t.creds.Token(req.Context()) + if err != nil { + return nil, err + } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } + req2 := req.Clone(req.Context()) + SetAuthHeader(token, req2) + reqBodyClosed = true + return t.base.RoundTrip(req2) +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go new file mode 100644 index 0000000000..9cd4bed61b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go @@ -0,0 +1,107 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credsfile is meant to hide implementation details from the pubic +// surface of the detect package. It should not import any other packages in +// this module. It is located under the main internal package so other +// sub-packages can use these parsed types as well. +package credsfile + +import ( + "os" + "os/user" + "path/filepath" + "runtime" +) + +const ( + // GoogleAppCredsEnvVar is the environment variable for setting the + // application default credentials. + GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + userCredsFilename = "application_default_credentials.json" +) + +// CredentialType represents different credential filetypes Google credentials +// can be. +type CredentialType int + +const ( + // UnknownCredType is an unidentified file type. + UnknownCredType CredentialType = iota + // UserCredentialsKey represents a user creds file type. + UserCredentialsKey + // ServiceAccountKey represents a service account file type. + ServiceAccountKey + // ImpersonatedServiceAccountKey represents a impersonated service account + // file type. + ImpersonatedServiceAccountKey + // ExternalAccountKey represents a external account file type. + ExternalAccountKey + // GDCHServiceAccountKey represents a GDCH file type. + GDCHServiceAccountKey + // ExternalAccountAuthorizedUserKey represents a external account authorized + // user file type. + ExternalAccountAuthorizedUserKey +) + +// parseCredentialType returns the associated filetype based on the parsed +// typeString provided. +func parseCredentialType(typeString string) CredentialType { + switch typeString { + case "service_account": + return ServiceAccountKey + case "authorized_user": + return UserCredentialsKey + case "impersonated_service_account": + return ImpersonatedServiceAccountKey + case "external_account": + return ExternalAccountKey + case "external_account_authorized_user": + return ExternalAccountAuthorizedUserKey + case "gdch_service_account": + return GDCHServiceAccountKey + default: + return UnknownCredType + } +} + +// GetFileNameFromEnv returns the override if provided or detects a filename +// from the environment. +func GetFileNameFromEnv(override string) string { + if override != "" { + return override + } + return os.Getenv(GoogleAppCredsEnvVar) +} + +// GetWellKnownFileName tries to locate the filepath for the user credential +// file based on the environment. +func GetWellKnownFileName() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename) +} + +// guessUnixHomeDir default to checking for HOME, but not all unix systems have +// this set, do have a fallback. +func guessUnixHomeDir() string { + if v := os.Getenv("HOME"); v != "" { + return v + } + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go new file mode 100644 index 0000000000..3be6e5bbb4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -0,0 +1,157 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// Config3LO is the internals of a client creds file. +type Config3LO struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` +} + +// ClientCredentialsFile representation. +type ClientCredentialsFile struct { + Web *Config3LO `json:"web"` + Installed *Config3LO `json:"installed"` + UniverseDomain string `json:"universe_domain"` +} + +// ServiceAccountFile representation. +type ServiceAccountFile struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientID string `json:"client_id"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} + +// UserCredentialsFile representation. +type UserCredentialsFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + QuotaProjectID string `json:"quota_project_id"` + RefreshToken string `json:"refresh_token"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountFile representation. +type ExternalAccountFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + TokenURL string `json:"token_url"` + CredentialSource *CredentialSource `json:"credential_source,omitempty"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountAuthorizedUserFile representation. +type ExternalAccountAuthorizedUserFile struct { + Type string `json:"type"` + Audience string `json:"audience"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + TokenURL string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + RevokeURL string `json:"revoke_url"` + QuotaProjectID string `json:"quota_project_id"` + UniverseDomain string `json:"universe_domain"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` +} + +// Format describes the format of a [CredentialSource]. +type Format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// ExecutableConfig represents the command to run for an executable +// [CredentialSource]. +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + +// ServiceAccountImpersonationInfo has impersonation configuration. +type ServiceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +// ImpersonatedServiceAccountFile representation. +type ImpersonatedServiceAccountFile struct { + Type string `json:"type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` + CredSource json.RawMessage `json:"source_credentials"` + UniverseDomain string `json:"universe_domain"` +} + +// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file. +type GDCHServiceAccountFile struct { + Type string `json:"type"` + FormatVersion string `json:"format_version"` + Project string `json:"project"` + Name string `json:"name"` + CertPath string `json:"ca_cert_path"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go new file mode 100644 index 0000000000..a02b9f5df7 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// ParseServiceAccount parses bytes into a [ServiceAccountFile]. +func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) { + var f *ServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseClientCredentials parses bytes into a +// [credsfile.ClientCredentialsFile]. +func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) { + var f *ClientCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseUserCredentials parses bytes into a [UserCredentialsFile]. +func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) { + var f *UserCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccount parses bytes into a [ExternalAccountFile]. +func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) { + var f *ExternalAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccountAuthorizedUser parses bytes into a +// [ExternalAccountAuthorizedUserFile]. +func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) { + var f *ExternalAccountAuthorizedUserFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseImpersonatedServiceAccount parses bytes into a +// [ImpersonatedServiceAccountFile]. +func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) { + var f *ImpersonatedServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile]. +func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) { + var f *GDCHServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +type fileTypeChecker struct { + Type string `json:"type"` +} + +// ParseFileType determines the [CredentialType] based on bytes provided. +func ParseFileType(b []byte) (CredentialType, error) { + var f fileTypeChecker + if err := json.Unmarshal(b, &f); err != nil { + return 0, err + } + return parseCredentialType(f.Type), nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go new file mode 100644 index 0000000000..4308345eda --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -0,0 +1,213 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const ( + // TokenTypeBearer is the auth header prefix for bearer tokens. + TokenTypeBearer = "Bearer" + + // QuotaProjectEnvVar is the environment variable for setting the quota + // project. + QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 + + // DefaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + DefaultUniverseDomain = "googleapis.com" +) + +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + + return &http.Client{ + Transport: http.DefaultTransport, + Timeout: 30 * time.Second, + } +} + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// GetQuotaProject retrieves quota project with precedence being: override, +// environment variable, creds json file. +func GetQuotaProject(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(QuotaProjectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + return v.QuotaProject +} + +// GetProjectID retrieves project with precedence being: override, +// environment variable, creds json file. +func GetProjectID(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(projectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + ProjectID string `json:"project_id"` // standard service account key + Project string `json:"project"` // gdch key + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + if v.ProjectID != "" { + return v.ProjectID + } + return v.Project +} + +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + +// ReadAll consumes the whole reader and safely reads the content of its body +// with some overflow protection. +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodySize)) +} + +// StaticCredentialsProperty is a helper for creating static credentials +// properties. +func StaticCredentialsProperty(s string) StaticProperty { + return StaticProperty(s) +} + +// StaticProperty always returns that value of the underlying string. +type StaticProperty string + +// GetProperty loads the properly value provided the given context. +func (p StaticProperty) GetProperty(context.Context) (string, error) { + return string(p), nil +} + +// ComputeUniverseDomainProvider fetches the credentials universe domain from +// the google cloud metadata service. +type ComputeUniverseDomainProvider struct { + universeDomainOnce sync.Once + universeDomain string + universeDomainErr error +} + +// GetProperty fetches the credentials universe domain from the google cloud +// metadata service. +func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { + c.universeDomainOnce.Do(func() { + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + }) + if c.universeDomainErr != nil { + return "", c.universeDomainErr + } + return c.universeDomain, nil +} + +// httpGetMetadataUniverseDomain is a package var for unit test substitution. +var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe_domain") +} + +func getMetadataUniverseDomain(ctx context.Context) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx) + if err == nil { + return universeDomain, nil + } + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return DefaultUniverseDomain, nil + } + return "", err +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go new file mode 100644 index 0000000000..dc28b3c3bb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +const ( + // HeaderAlgRSA256 is the RS256 [Header.Algorithm]. + HeaderAlgRSA256 = "RS256" + // HeaderAlgES256 is the ES256 [Header.Algorithm]. + HeaderAlgES256 = "ES256" + // HeaderType is the standard [Header.Type]. + HeaderType = "JWT" +) + +// Header represents a JWT header. +type Header struct { + Algorithm string `json:"alg"` + Type string `json:"typ"` + KeyID string `json:"kid"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Claims represents the claims set of a JWT. +type Claims struct { + // Iss is the issuer JWT claim. + Iss string `json:"iss"` + // Scope is the scope JWT claim. + Scope string `json:"scope,omitempty"` + // Exp is the expiry JWT claim. If unset, default is in one hour from now. + Exp int64 `json:"exp"` + // Iat is the subject issued at claim. If unset, default is now. + Iat int64 `json:"iat"` + // Aud is the audience JWT claim. Optional. + Aud string `json:"aud"` + // Sub is the subject JWT claim. Optional. + Sub string `json:"sub,omitempty"` + // AdditionalClaims contains any additional non-standard JWT claims. Optional. + AdditionalClaims map[string]interface{} `json:"-"` +} + +func (c *Claims) encode() (string, error) { + // Compensate for skew + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.AdditionalClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.AdditionalClaims) + if err != nil { + return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// EncodeJWS encodes the data using the provided key as a JSON web signature. +func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + claims, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, claims) + h := sha256.New() + h.Write([]byte(ss)) + sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// DecodeJWS decodes a claim set from a JWS payload. +func DecodeJWS(payload string) (*Claims, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + return nil, errors.New("invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &Claims{} + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil { + return nil, err + } + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil { + return nil, err + } + return c, err +} + +// VerifyJWS tests whether the provided JWT token's signature was produced by +// the private key associated with the provided public key. +func VerifyJWS(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jwt: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go new file mode 100644 index 0000000000..26e037c1a3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -0,0 +1,356 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/cert" + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE" + googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT" + googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// Options is a struct that is duplicated information from the individual +// transport packages in order to avoid cyclic deps. It correlates 1:1 with +// fields on httptransport.Options and grpctransport.Options. +type Options struct { + Endpoint string + DefaultMTLSEndpoint string + DefaultEndpointTemplate string + ClientCertProvider cert.Provider + Client *http.Client + UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool +} + +// getUniverseDomain returns the default service domain for a given Cloud +// universe. +func (o *Options) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *Options) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +// defaultEndpoint returns the DefaultEndpointTemplate merged with the +// universe domain if the DefaultEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultEndpoint() string { + if o.DefaultEndpointTemplate == "" { + return "" + } + return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the +// default endpoint. +func (o *Options) mergedEndpoint() (string, error) { + defaultEndpoint := o.defaultEndpoint() + u, err := url.Parse(fixScheme(defaultEndpoint)) + if err != nil { + return "", err + } + return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportCredsAndEndpoint returns an instance of +// [google.golang.org/grpc/credentials.TransportCredentials], and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return defaultTransportCreds, config.endpoint, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfig returns a client certificate source and a function for +// dialing MTLS with S2A. +func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, nil, err + } + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return config.clientCertSource, nil, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, nil +} + +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + +func getTransportConfig(opts *Options) (*transportConfig, error) { + clientCertSource, err := GetClientCertificateProvider(opts) + if err != nil { + return nil, err + } + endpoint, err := getEndpoint(opts, clientCertSource) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + } + + if !shouldUseS2A(clientCertSource, opts) { + return &defaultTransportConfig, nil + } + if !opts.isUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aAddress := GetS2AAddress() + mtlsS2AAddress := GetMTLSS2AAddress() + if s2aAddress == "" && mtlsS2AAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, + }, nil +} + +// GetClientCertificateProvider returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { + return nil, nil + } else if opts.ClientCertProvider != nil { + return opts.ClientCertProvider, nil + } + return cert.DefaultProvider() + +} + +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() +} + +type transportConfig struct { + // The client certificate source. + clientCertSource cert.Provider + // The corresponding endpoint to use based on client certificate source. + endpoint string + // The plaintext S2A address if it can be used, otherwise an empty string. + s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string + // The MTLS endpoint to use with S2A. + s2aMTLSEndpoint string +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { + if opts.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if !opts.isUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return opts.DefaultMTLSEndpoint, nil + } + return opts.defaultEndpoint(), nil + } + if strings.Contains(opts.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return opts.Endpoint, nil + } + if opts.defaultEndpoint() == "" { + // If DefaultEndpointTemplate is not configured, + // use the user provided endpoint verbatim. This allows a naked + // "host[:port]" URL to be used with GRPC Direct Path. + return opts.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return opts.mergedEndpoint() +} + +func getMTLSMode() string { + mode := os.Getenv(googleAPIUseMTLS) + if mode == "" { + mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go new file mode 100644 index 0000000000..5cedc50f1e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certificate provider created by [DefaultProvider]. +// +// A singleton model is used to allow the provider to be reused +// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil) +// may be returned to indicate a default provider could not be found, which +// will skip extra tls config in the transport layer . +type defaultCertData struct { + once sync.Once + provider Provider + err error +} + +var ( + defaultCert defaultCertData +) + +// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultProvider() (Provider, error) { + defaultCert.once.Do(func() { + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } + } + } + }) + return defaultCert.provider, defaultCert.err +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go new file mode 100644 index 0000000000..3665159161 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxyProvider creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go new file mode 100644 index 0000000000..738cb21618 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -0,0 +1,124 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectProvider creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 0000000000..e8675bf824 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", errSourceUnavailable + } + return "", "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errSourceUnavailable + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go new file mode 100644 index 0000000000..37894bfcd0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -0,0 +1,134 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strconv" + "sync" + + "cloud.google.com/go/auth/internal/transport/cert" + "cloud.google.com/go/compute/metadata" +) + +const ( + configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" +) + +var ( + mtlsConfiguration *mtlsConfig + + mtlsOnce sync.Once +) + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +// Returns empty string if not set or invalid. +func GetS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.PlaintextAddress +} + +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` +} + +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +func getMetadataMTLSAutoConfig() { + var err error + mtlsOnce.Do(func() { + mtlsConfiguration, err = queryConfig() + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) + } + }) +} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.GetWithContext(context.Background(), configEndpointSuffix) +} + +func queryConfig() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) + } + if config.S2A == nil { + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) + } + return &config, nil +} + +func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if opts.Client != nil { + return false + } + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv)) + if err != nil { + return false + } + return b +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go new file mode 100644 index 0000000000..cc586ec5b1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -0,0 +1,105 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provided internal helpers for the two transport packages +// (grpctransport and httptransport). +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth/credentials" +) + +// CloneDetectOptions clones a user set detect option into some new memory that +// we can internally manipulate before sending onto the detect package. +func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { + if oldDo == nil { + // it is valid for users not to set this, but we will need to to default + // some options for them in this case so return some initialized memory + // to work with. + return &credentials.DetectOptions{} + } + newDo := &credentials.DetectOptions{ + // Simple types + Audience: oldDo.Audience, + Subject: oldDo.Subject, + EarlyTokenRefresh: oldDo.EarlyTokenRefresh, + TokenURL: oldDo.TokenURL, + STSAudience: oldDo.STSAudience, + CredentialsFile: oldDo.CredentialsFile, + UseSelfSignedJWT: oldDo.UseSelfSignedJWT, + UniverseDomain: oldDo.UniverseDomain, + + // These fields are are pointer types that we just want to use exactly + // as the user set, copy the ref + Client: oldDo.Client, + AuthHandlerOptions: oldDo.AuthHandlerOptions, + } + + // Smartly size this memory and copy below. + if len(oldDo.CredentialsJSON) > 0 { + newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) + copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) + } + if len(oldDo.Scopes) > 0 { + newDo.Scopes = make([]string, len(oldDo.Scopes)) + copy(newDo.Scopes, oldDo.Scopes) + } + + return newDo +} + +// ValidateUniverseDomain verifies that the universe domain configured for the +// client matches the universe domain configured for the credentials. +func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error { + if clientUniverseDomain != credentialsUniverseDomain { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "the universe domain explicitly, \"googleapis.com\" is the default", + clientUniverseDomain, + credentialsUniverseDomain) + } + return nil +} + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := BaseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md new file mode 100644 index 0000000000..7faf6e0c98 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -0,0 +1,54 @@ +# Changelog + +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16) + + +### Features + +* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## 0.1.0 (2023-10-19) + + +### Features + +* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f)) +* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go new file mode 100644 index 0000000000..9835ac571c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -0,0 +1,164 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth] +// and [golang.org/x/oauth2]. +package oauth2adapt + +import ( + "context" + "encoding/json" + "errors" + + "cloud.google.com/go/auth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] +// into a [cloud.google.com/go/auth.TokenProvider]. +func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { + return &tokenProviderAdapter{ts: ts} +} + +type tokenProviderAdapter struct { + ts oauth2.TokenSource +} + +// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It +// is a light wrapper around the underlying TokenSource. +func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { + tok, err := tp.ts.Token() + if err != nil { + var err2 *oauth2.RetrieveError + if ok := errors.As(err, &err2); ok { + return nil, AuthErrorFromRetrieveError(err2) + } + return nil, err + } + return &auth.Token{ + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + }, nil +} + +// TokenSourceFromTokenProvider converts any +// [cloud.google.com/go/auth.TokenProvider] into a +// [golang.org/x/oauth2.TokenSource]. +func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource { + return &tokenSourceAdapter{tp: tp} +} + +type tokenSourceAdapter struct { + tp auth.TokenProvider +} + +// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It +// is a light wrapper around the underlying TokenProvider. +func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { + tok, err := ts.tp.Token(context.Background()) + if err != nil { + var err2 *auth.Error + if ok := errors.As(err, &err2); ok { + return nil, AddRetrieveErrorToAuthError(err2) + } + return nil, err + } + return &oauth2.Token{ + AccessToken: tok.Value, + TokenType: tok.Type, + Expiry: tok.Expiry, + }, nil +} + +// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] +// to a [cloud.google.com/go/auth.Credentials]. +func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials { + if creds == nil { + return nil + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: TokenProviderFromTokenSource(creds.TokenSource), + JSON: creds.JSON, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.ProjectID, nil + }), + UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.GetUniverseDomain() + }), + }) +} + +// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials] +// to a [golang.org/x/oauth2/google.Credentials]. +func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials { + if creds == nil { + return nil + } + // Throw away errors as old credentials are not request aware. Also, no + // network requests are currently happening for this use case. + projectID, _ := creds.ProjectID(context.Background()) + + return &google.Credentials{ + TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider), + ProjectID: projectID, + JSON: creds.JSON(), + UniverseDomainProvider: func() (string, error) { + return creds.UniverseDomain(context.Background()) + }, + } +} + +type oauth2Error struct { + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +// AddRetrieveErrorToAuthError returns the same error provided and adds a +// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the +// [cloud.google.com/go/auth.Error]. +func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error { + if err == nil { + return nil + } + e := &oauth2.RetrieveError{ + Response: err.Response, + Body: err.Body, + } + err.Err = e + if len(err.Body) > 0 { + var oErr oauth2Error + // ignore the error as it only fills in extra details + json.Unmarshal(err.Body, &oErr) + e.ErrorCode = oErr.ErrorCode + e.ErrorDescription = oErr.ErrorDescription + e.ErrorURI = oErr.ErrorURI + } + return err +} + +// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that +// wraps the provided [golang.org/x/oauth2.RetrieveError]. +func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error { + if err == nil { + return nil + } + return &auth.Error{ + Response: err.Response, + Body: err.Body, + Err: err, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go new file mode 100644 index 0000000000..97a57f4694 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -0,0 +1,368 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for +// OAuth consent at the specified auth code URL and returns an auth code and +// state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// Options3LO are the options for doing a 3-legged OAuth2 flow. +type Options3LO struct { + // ClientID is the application's ID. + ClientID string + // ClientSecret is the application's secret. Not required if AuthHandlerOpts + // is set. + ClientSecret string + // AuthURL is the URL for authenticating. + AuthURL string + // TokenURL is the URL for retrieving a token. + TokenURL string + // AuthStyle is used to describe how to client info in the token request. + AuthStyle Style + // RefreshToken is the token used to refresh the credential. Not required + // if AuthHandlerOpts is set. + RefreshToken string + // RedirectURL is the URL to redirect users to. Optional. + RedirectURL string + // Scopes specifies requested permissions for the Token. Optional. + Scopes []string + + // URLParams are the set of values to apply to the token exchange. Optional. + URLParams url.Values + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // EarlyTokenExpiry is the time before the token expires that it should be + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. + EarlyTokenExpiry time.Duration + + // AuthHandlerOpts provides a set of options for doing a + // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. + AuthHandlerOpts *AuthorizationHandlerOptions +} + +func (o *Options3LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.ClientID == "" { + return errors.New("auth: client ID must be provided") + } + if o.AuthHandlerOpts == nil && o.ClientSecret == "" { + return errors.New("auth: client secret must be provided") + } + if o.AuthURL == "" { + return errors.New("auth: auth URL must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + if o.AuthStyle == StyleUnknown { + return errors.New("auth: auth style must be provided") + } + if o.AuthHandlerOpts == nil && o.RefreshToken == "" { + return errors.New("auth: refresh token must be provided") + } + return nil +} + +// PKCEOptions holds parameters to support PKCE. +type PKCEOptions struct { + // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. + Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier. + // ChallengeMethod is the encryption method (ex. S256). + ChallengeMethod string + // Verifier is the original, non-encrypted secret. + Verifier string // The original, non-encrypted secret. +} + +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + // error fields + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +func (o *Options3LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +// authCodeURL returns a URL that points to a OAuth2 consent page. +func (o *Options3LO) authCodeURL(state string, values url.Values) string { + var buf bytes.Buffer + buf.WriteString(o.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {o.ClientID}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if len(o.Scopes) > 0 { + v.Set("scope", strings.Join(o.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + if o.AuthHandlerOpts != nil { + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Challenge != "" { + v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge) + } + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" { + v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod) + } + } + for k := range values { + v.Set(k, v.Get(k)) + } + if strings.Contains(o.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2 +// configuration. The TokenProvider is caches and auto-refreshes tokens by +// default. +func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.AuthHandlerOpts != nil { + return new3LOTokenProviderWithAuthHandler(opts), nil + } + return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }), nil +} + +// AuthorizationHandlerOptions provides a set of options to specify for doing a +// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. +type AuthorizationHandlerOptions struct { + // AuthorizationHandler specifies the handler used to for the authorization + // part of the flow. + Handler AuthorizationHandler + // State is used verify that the "state" is identical in the request and + // response before exchanging the auth code for OAuth2 token. + State string + // PKCEOpts allows setting configurations for PKCE. Optional. + PKCEOpts *PKCEOptions +} + +func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider { + return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }) +} + +// exchange handles the final exchange portion of the 3lo flow. Returns a Token, +// refreshToken, and error. +func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) { + // Build request + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if o.AuthHandlerOpts != nil && + o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Verifier != "" { + v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier) + } + for k := range o.URLParams { + v.Set(k, o.URLParams.Get(k)) + } + return fetchToken(ctx, o, v) +} + +// This struct is not safe for concurrent access alone, but the way it is used +// in this package by wrapping it with a cachedTokenProvider makes it so. +type tokenProvider3LO struct { + opts *Options3LO + client *http.Client + refreshToken string +} + +func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) { + if tp.refreshToken == "" { + return nil, errors.New("auth: token expired and refresh token is not set") + } + v := url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tp.refreshToken}, + } + for k := range tp.opts.URLParams { + v.Set(k, tp.opts.URLParams.Get(k)) + } + + tk, rt, err := fetchToken(ctx, tp.opts, v) + if err != nil { + return nil, err + } + if tp.refreshToken != rt && rt != "" { + tp.refreshToken = rt + } + return tk, err +} + +type tokenProviderWithHandler struct { + opts *Options3LO + state string +} + +func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) { + url := tp.opts.authCodeURL(tp.state, nil) + code, state, err := tp.opts.AuthHandlerOpts.Handler(url) + if err != nil { + return nil, err + } + if state != tp.state { + return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow") + } + tok, _, err := tp.opts.exchange(ctx, code) + return tok, err +} + +// fetchToken returns a Token, refresh token, and/or an error. +func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) { + var refreshToken string + if o.AuthStyle == StyleInParams { + if o.ClientID != "" { + v.Set("client_id", o.ClientID) + } + if o.ClientSecret != "" { + v.Set("client_secret", o.ClientSecret) + } + } + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, refreshToken, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if o.AuthStyle == StyleInHeader { + req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) + } + + // Make request + resp, body, err := internal.DoRequest(o.client(), req) + if err != nil { + return nil, refreshToken, err + } + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 + tokError := &Error{ + Response: resp, + Body: body, + } + + var token *Token + // errors ignored because of default switch on content + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err) + } + tokError.code = vals.Get("error") + tokError.description = vals.Get("error_description") + tokError.uri = vals.Get("error_uri") + token = &Token{ + Value: vals.Get("access_token"), + Type: vals.Get("token_type"), + Metadata: make(map[string]interface{}, len(vals)), + } + for k, v := range vals { + token.Metadata[k] = v + } + refreshToken = vals.Get("refresh_token") + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err) + } + tokError.code = tj.ErrorCode + tokError.description = tj.ErrorDescription + tokError.uri = tj.ErrorURI + token = &Token{ + Value: tj.AccessToken, + Type: tj.TokenType, + Expiry: tj.expiry(), + Metadata: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Metadata) // optional field, skip err check + refreshToken = tj.RefreshToken + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || tokError.code != "" { + return nil, refreshToken, tokError + } + if token.Value == "" { + return nil, refreshToken, errors.New("auth: server response missing access_token") + } + return token, refreshToken, nil +} diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e060747..9594e1e279 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,24 @@ # Changes +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7eea..345080b729 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -28,7 +28,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -88,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,21 +189,9 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +214,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -296,7 +418,6 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } @@ -381,6 +502,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +517,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +530,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +638,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +669,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +706,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +732,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 0000000000..e0704fa647 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/cuelang.org/go/cue/parser/fuzz.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go similarity index 60% rename from vendor/cuelang.org/go/cue/parser/fuzz.go rename to vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go index eceb1ae848..74689acbbb 100644 --- a/vendor/cuelang.org/go/cue/parser/fuzz.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -1,10 +1,10 @@ -// Copyright 2019 CUE Authors +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,14 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build gofuzz +//go:build linux -package parser +package metadata -func Fuzz(b []byte) int { - _, err := ParseFile("go-fuzz", b) - if err != nil { - return 0 - } - return 1 +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 0000000000..c0ce627872 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/README.md b/vendor/cuelabs.dev/go/oci/ociregistry/README.md index 2ef7faf1dd..d6d7c1836a 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/README.md +++ b/vendor/cuelabs.dev/go/oci/ociregistry/README.md @@ -10,9 +10,9 @@ and an HTTP server that implements the [OCI registry protocol](https://github.co The server currently passes the [conformance tests](https://pkg.go.dev/github.com/opencontainers/distribution-spec/conformance). -That said, it is in total flux at the moment! Do not use it as a dependency, as the API is changing hourly. - -The aim, however, is to provide an ergonomic interface for defining and layering +The aim is to provide an ergonomic interface for defining and layering OCI registry implementations. +Although the API is fairly stable, it's still in v0 currently, so incompatible changes can't be ruled out. + The code was originally derived from the [go-containerregistry](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/registry) registry, but has considerably diverged since then. diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/error.go b/vendor/cuelabs.dev/go/oci/ociregistry/error.go index 09368e01ca..f41c90d367 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/error.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/error.go @@ -14,12 +14,110 @@ package ociregistry +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "unicode" +) + +var errorStatuses = map[string]int{ + ErrBlobUnknown.Code(): http.StatusNotFound, + ErrBlobUploadInvalid.Code(): http.StatusRequestedRangeNotSatisfiable, + ErrBlobUploadUnknown.Code(): http.StatusNotFound, + ErrDigestInvalid.Code(): http.StatusBadRequest, + ErrManifestBlobUnknown.Code(): http.StatusNotFound, + ErrManifestInvalid.Code(): http.StatusBadRequest, + ErrManifestUnknown.Code(): http.StatusNotFound, + ErrNameInvalid.Code(): http.StatusBadRequest, + ErrNameUnknown.Code(): http.StatusNotFound, + ErrSizeInvalid.Code(): http.StatusBadRequest, + ErrUnauthorized.Code(): http.StatusUnauthorized, + ErrDenied.Code(): http.StatusForbidden, + ErrUnsupported.Code(): http.StatusBadRequest, + ErrTooManyRequests.Code(): http.StatusTooManyRequests, + ErrRangeInvalid.Code(): http.StatusRequestedRangeNotSatisfiable, +} + +// WireErrors is the JSON format used for error responses in +// the OCI HTTP API. It should always contain at least one +// error. +type WireErrors struct { + Errors []WireError `json:"errors"` +} + +// Unwrap allows [errors.Is] and [errors.As] to +// see the errors inside e. +func (e *WireErrors) Unwrap() []error { + // TODO we could do this only once. + errs := make([]error, len(e.Errors)) + for i := range e.Errors { + errs[i] = &e.Errors[i] + } + return errs +} + +func (e *WireErrors) Error() string { + var buf strings.Builder + buf.WriteString(e.Errors[0].Error()) + for i := range e.Errors[1:] { + buf.WriteString("; ") + buf.WriteString(e.Errors[i+1].Error()) + } + return buf.String() +} + +// WireError holds a single error in an OCI HTTP response. +type WireError struct { + Code_ string `json:"code"` + Message string `json:"message,omitempty"` + // Detail_ holds the JSON detail for the message. + // It's assumed to be valid JSON if non-empty. + Detail_ json.RawMessage `json:"detail,omitempty"` +} + +// Is makes it possible for users to write `if errors.Is(err, ociregistry.ErrBlobUnknown)` +// even when the error hasn't exactly wrapped that error. +func (e *WireError) Is(err error) bool { + var rerr Error + return errors.As(err, &rerr) && rerr.Code() == e.Code() +} + +// Error implements the [error] interface. +func (e *WireError) Error() string { + buf := make([]byte, 0, 128) + buf = appendErrorCodePrefix(buf, e.Code_) + + if e.Message != "" { + buf = append(buf, ": "...) + buf = append(buf, e.Message...) + } + // TODO: it would be nice to have some way to surface the detail + // in a message, but it's awkward to do so here because we don't + // really want the detail to be duplicated in the "message" + // and "detail" fields. + return string(buf) +} + +// Code implements [Error.Code]. +func (e *WireError) Code() string { + return e.Code_ +} + +// Detail implements [Error.Detail]. +func (e *WireError) Detail() json.RawMessage { + return e.Detail_ +} + // NewError returns a new error with the given code, message and detail. -func NewError(msg string, code string, detail any) Error { - return ®istryError{ - code: code, - message: msg, - detail: detail, +func NewError(msg string, code string, detail json.RawMessage) Error { + return &WireError{ + Code_: code, + Message: msg, + Detail_: detail, } } @@ -31,12 +129,181 @@ type Error interface { // error.Error provides the error message. error - // Code returns the error code. See + // Code returns the error code. Code() string - // Detail returns any detail to be associated with the error; it should - // be JSON-marshable. - Detail() any + // Detail returns any detail associated with the error, + // or nil if there is none. + // The caller should not mutate the returned slice. + Detail() json.RawMessage +} + +// HTTPError is optionally implemented by an error when +// the error has originated from an HTTP request +// or might be returned from one. +type HTTPError interface { + error + + // StatusCode returns the HTTP status code of the response. + StatusCode() int + + // Response holds the HTTP response that caused the HTTPError to + // be created. It will return nil if the error was not created + // as a result of an HTTP response. + // + // The caller should not read the response body or otherwise + // change the response (mutation of errors is a Bad Thing). + // + // Use the ResponseBody method to obtain the body of the + // response if needed. + Response() *http.Response + + // ResponseBody returns the contents of the response body. It + // will return nil if the error was not created as a result of + // an HTTP response. + // + // The caller should not change or append to the returned data. + ResponseBody() []byte +} + +// NewHTTPError returns an error that wraps err to make an [HTTPError] +// that represents the given status code, response and response body. +// Both response and body may be nil. +// +// A shallow copy is made of the response. +func NewHTTPError(err error, statusCode int, response *http.Response, body []byte) HTTPError { + herr := &httpError{ + underlying: err, + statusCode: statusCode, + } + if response != nil { + herr.response = ref(*response) + herr.response.Body = nil + herr.body = body + } + return herr +} + +type httpError struct { + underlying error + statusCode int + response *http.Response + body []byte +} + +// Unwrap implements the [errors] Unwrap interface. +func (e *httpError) Unwrap() error { + return e.underlying +} + +// Is makes it possible for users to write `if errors.Is(err, ociregistry.ErrRangeInvalid)` +// even when the error hasn't exactly wrapped that error. +func (e *httpError) Is(err error) bool { + switch e.statusCode { + case http.StatusRequestedRangeNotSatisfiable: + return err == ErrRangeInvalid + } + return false +} + +// Error implements [error.Error]. +func (e *httpError) Error() string { + buf := make([]byte, 0, 128) + buf = appendHTTPStatusPrefix(buf, e.statusCode) + if e.underlying != nil { + buf = append(buf, ": "...) + buf = append(buf, e.underlying.Error()...) + } + // TODO if underlying is nil, include some portion of e.body in the message? + return string(buf) +} + +// StatusCode implements [HTTPError.StatusCode]. +func (e *httpError) StatusCode() int { + return e.statusCode +} + +// Response implements [HTTPError.Response]. +func (e *httpError) Response() *http.Response { + return e.response +} + +// ResponseBody implements [HTTPError.ResponseBody]. +func (e *httpError) ResponseBody() []byte { + return e.body +} + +// MarshalError marshals the given error as JSON according +// to the OCI distribution specification. It also returns +// the associated HTTP status code, or [http.StatusInternalServerError] +// if no specific code can be found. +// +// If err is or wraps [Error], that code will be used for the "code" +// field in the marshaled error. +// +// If err wraps [HTTPError] and no HTTP status code is known +// for the error code, [HTTPError.StatusCode] will be used. +func MarshalError(err error) (errorBody []byte, httpStatus int) { + var e WireError + // TODO perhaps we should iterate through all the + // errors instead of just choosing one. + // See https://github.com/golang/go/issues/66455 + var ociErr Error + if errors.As(err, &ociErr) { + e.Code_ = ociErr.Code() + e.Detail_ = ociErr.Detail() + } + if e.Code_ == "" { + // This is contrary to spec, but it's what the Docker registry + // does, so it can't be too bad. + e.Code_ = "UNKNOWN" + } + // Use the HTTP status code from the error only when there isn't + // one implied from the error code. This means that the HTTP status + // is always consistent with the error code, but still allows a registry + // to choose custom HTTP status codes for other codes. + httpStatus = http.StatusInternalServerError + if status, ok := errorStatuses[e.Code_]; ok { + httpStatus = status + } else { + var httpErr HTTPError + if errors.As(err, &httpErr) { + httpStatus = httpErr.StatusCode() + } + } + // Prevent the message from containing a redundant + // error code prefix by stripping it before sending over the + // wire. This won't always work, but is enough to prevent + // adjacent stuttering of code prefixes when a client + // creates a WireError from an error response. + e.Message = trimErrorCodePrefix(err, httpStatus, e.Code_) + data, err := json.Marshal(WireErrors{ + Errors: []WireError{e}, + }) + if err != nil { + panic(fmt.Errorf("cannot marshal error: %v", err)) + } + return data, httpStatus +} + +// trimErrorCodePrefix returns err's string +// with any prefix codes added by [HTTPError] +// or [WireError] removed. +func trimErrorCodePrefix(err error, httpStatus int, errorCode string) string { + msg := err.Error() + buf := make([]byte, 0, 128) + if httpStatus != 0 { + buf = appendHTTPStatusPrefix(buf, httpStatus) + buf = append(buf, ": "...) + msg = strings.TrimPrefix(msg, string(buf)) + } + if errorCode != "" { + buf = buf[:0] + buf = appendErrorCodePrefix(buf, errorCode) + buf = append(buf, ": "...) + msg = strings.TrimPrefix(msg, string(buf)) + } + return msg } // The following values represent the known error codes. @@ -66,20 +333,27 @@ var ( ErrRangeInvalid = NewError("invalid content range", "RANGE_INVALID", nil) ) -type registryError struct { - code string - message string - detail any +func appendHTTPStatusPrefix(buf []byte, statusCode int) []byte { + buf = strconv.AppendInt(buf, int64(statusCode), 10) + buf = append(buf, ' ') + buf = append(buf, http.StatusText(statusCode)...) + return buf } -func (e *registryError) Code() string { - return e.code -} - -func (e *registryError) Error() string { - return e.message +func appendErrorCodePrefix(buf []byte, code string) []byte { + if code == "" { + return append(buf, "(no code)"...) + } + for _, r := range code { + if r == '_' { + buf = append(buf, ' ') + } else { + buf = append(buf, string(unicode.ToLower(r))...) + } + } + return buf } -func (e *registryError) Detail() any { - return e.detail +func ref[T any](x T) *T { + return &x } diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/func.go b/vendor/cuelabs.dev/go/oci/ociregistry/func.go index e916ac8277..e3d44c52cb 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/func.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/func.go @@ -20,6 +20,8 @@ import ( "io" ) +var _ Interface = (*Funcs)(nil) + // Funcs implements Interface by calling its member functions: there's one field // for every corresponding method of [Interface]. // @@ -52,9 +54,9 @@ type Funcs struct { DeleteBlob_ func(ctx context.Context, repo string, digest Digest) error DeleteManifest_ func(ctx context.Context, repo string, digest Digest) error DeleteTag_ func(ctx context.Context, repo string, name string) error - Repositories_ func(ctx context.Context) Iter[string] - Tags_ func(ctx context.Context, repo string) Iter[string] - Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor] + Repositories_ func(ctx context.Context, startAfter string) Seq[string] + Tags_ func(ctx context.Context, repo string, startAfter string) Seq[string] + Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] } // This blesses Funcs as the canonical Interface implementation. @@ -172,23 +174,23 @@ func (f *Funcs) DeleteTag(ctx context.Context, repo string, name string) error { return f.newError(ctx, "DeleteTag", repo) } -func (f *Funcs) Repositories(ctx context.Context) Iter[string] { +func (f *Funcs) Repositories(ctx context.Context, startAfter string) Seq[string] { if f != nil && f.Repositories_ != nil { - return f.Repositories_(ctx) + return f.Repositories_(ctx, startAfter) } - return ErrorIter[string](f.newError(ctx, "Repositories", "")) + return ErrorSeq[string](f.newError(ctx, "Repositories", "")) } -func (f *Funcs) Tags(ctx context.Context, repo string) Iter[string] { +func (f *Funcs) Tags(ctx context.Context, repo string, startAfter string) Seq[string] { if f != nil && f.Tags_ != nil { - return f.Tags_(ctx, repo) + return f.Tags_(ctx, repo, startAfter) } - return ErrorIter[string](f.newError(ctx, "Tags", repo)) + return ErrorSeq[string](f.newError(ctx, "Tags", repo)) } -func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor] { +func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] { if f != nil && f.Referrers_ != nil { return f.Referrers_(ctx, repo, digest, artifactType) } - return ErrorIter[Descriptor](f.newError(ctx, "Referrers", repo)) + return ErrorSeq[Descriptor](f.newError(ctx, "Referrers", repo)) } diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go index 1df3fd9e1d..f110e81702 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go @@ -210,20 +210,24 @@ type Deleter interface { // Lister defines registry operations that enumerate objects within the registry. // TODO support resumption from a given point. type Lister interface { - // Repositories returns an iterator that can be used to iterate over all the repositories - // in the registry. - Repositories(ctx context.Context) Iter[string] - - // Tags returns an iterator that can be used to iterate over all the tags - // in the given repository. - Tags(ctx context.Context, repo string) Iter[string] + // Repositories returns an iterator that can be used to iterate + // over all the repositories in the registry in lexical order. + // If startAfter is non-empty, the iteration starts lexically + // after, but not including, that repository. + Repositories(ctx context.Context, startAfter string) Seq[string] + + // Tags returns an iterator that can be used to iterate over all + // the tags in the given repository in lexical order. If + // startAfter is non-empty, the tags start lexically after, but + // not including that tag. + Tags(ctx context.Context, repo string, startAfter string) Seq[string] // Referrers returns an iterator that can be used to iterate over all // the manifests that have the given digest as their Subject. // If artifactType is non-zero, the results will be restricted to // only manifests with that type. // TODO is it possible to ask for multiple artifact types? - Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor] + Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] } // BlobWriter provides a handle for uploading a blob to a registry. diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go new file mode 100644 index 0000000000..3a1e9b4dd1 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go @@ -0,0 +1,111 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocirequest + +import ( + "encoding/base64" + "fmt" + "net/url" +) + +func (req *Request) Construct() (method string, ustr string, err error) { + method, ustr = req.construct() + u, err := url.Parse(ustr) + if err != nil { + return "", "", fmt.Errorf("invalid OCI request: %v", err) + } + if _, err := Parse(method, u); err != nil { + return "", "", fmt.Errorf("invalid OCI request: %v", err) + } + return method, ustr, nil +} + +func (req *Request) MustConstruct() (method string, ustr string) { + method, ustr, err := req.Construct() + if err != nil { + panic(err) + } + return method, ustr +} + +func (req *Request) construct() (method string, url string) { + switch req.Kind { + case ReqPing: + return "GET", "/v2/" + case ReqBlobGet: + return "GET", "/v2/" + req.Repo + "/blobs/" + req.Digest + case ReqBlobHead: + return "HEAD", "/v2/" + req.Repo + "/blobs/" + req.Digest + case ReqBlobDelete: + return "DELETE", "/v2/" + req.Repo + "/blobs/" + req.Digest + case ReqBlobStartUpload: + return "POST", "/v2/" + req.Repo + "/blobs/uploads/" + case ReqBlobUploadBlob: + return "POST", "/v2/" + req.Repo + "/blobs/uploads/?digest=" + req.Digest + case ReqBlobMount: + return "POST", "/v2/" + req.Repo + "/blobs/uploads/?mount=" + req.Digest + "&from=" + req.FromRepo + case ReqBlobUploadInfo: + // Note: this is specific to the ociserver implementation. + return "GET", req.uploadPath() + case ReqBlobUploadChunk: + // Note: this is specific to the ociserver implementation. + return "PATCH", req.uploadPath() + case ReqBlobCompleteUpload: + // Note: this is specific to the ociserver implementation. + // TODO this is bogus when the upload ID contains query parameters. + return "PUT", req.uploadPath() + "?digest=" + req.Digest + case ReqManifestGet: + return "GET", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest() + case ReqManifestHead: + return "HEAD", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest() + case ReqManifestPut: + return "PUT", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest() + case ReqManifestDelete: + return "DELETE", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest() + case ReqTagsList: + return "GET", "/v2/" + req.Repo + "/tags/list" + req.listParams() + case ReqReferrersList: + return "GET", "/v2/" + req.Repo + "/referrers/" + req.Digest + case ReqCatalogList: + return "GET", "/v2/_catalog" + req.listParams() + default: + panic("invalid request kind") + } +} + +func (req *Request) uploadPath() string { + return "/v2/" + req.Repo + "/blobs/uploads/" + base64.RawURLEncoding.EncodeToString([]byte(req.UploadID)) +} + +func (req *Request) listParams() string { + q := make(url.Values) + if req.ListN >= 0 { + q.Set("n", fmt.Sprint(req.ListN)) + } + if req.ListLast != "" { + q.Set("last", req.ListLast) + } + if len(q) > 0 { + return "?" + q.Encode() + } + return "" +} + +func (req *Request) tagOrDigest() string { + if req.Tag != "" { + return req.Tag + } + return req.Digest +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go new file mode 100644 index 0000000000..1bb19b5817 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go @@ -0,0 +1,445 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocirequest + +import ( + "encoding/base64" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "unicode/utf8" + + "cuelabs.dev/go/oci/ociregistry" +) + +// ParseError represents an error that can happen when parsing. +// The Err field holds one of the possible error values below. +type ParseError struct { + Err error +} + +func (e *ParseError) Error() string { + return e.Err.Error() +} + +func (e *ParseError) Unwrap() error { + return e.Err +} + +var ( + ErrNotFound = errors.New("page not found") + ErrBadlyFormedDigest = errors.New("badly formed digest") + ErrMethodNotAllowed = errors.New("method not allowed") + ErrBadRequest = errors.New("bad request") +) + +type Request struct { + Kind Kind + + // Repo holds the repository name. Valid for all request kinds + // except ReqCatalogList and ReqPing. + Repo string + + // Digest holds the digest being used in the request. + // Valid for: + // ReqBlobMount + // ReqBlobUploadBlob + // ReqBlobGet + // ReqBlobHead + // ReqBlobDelete + // ReqBlobCompleteUpload + // ReqReferrersList + // + // Valid for these manifest requests when they're referring to a digest + // rather than a tag: + // ReqManifestGet + // ReqManifestHead + // ReqManifestPut + // ReqManifestDelete + Digest string + + // Tag holds the tag being used in the request. Valid for + // these manifest requests when they're referring to a tag: + // ReqManifestGet + // ReqManifestHead + // ReqManifestPut + // ReqManifestDelete + Tag string + + // FromRepo holds the repository name to mount from + // for ReqBlobMount. + FromRepo string + + // UploadID holds the upload identifier as used for + // chunked uploads. + // Valid for: + // ReqBlobUploadInfo + // ReqBlobUploadChunk + UploadID string + + // ListN holds the maximum count for listing. + // It's -1 to specify that all items should be returned. + // + // Valid for: + // ReqTagsList + // ReqCatalog + // ReqReferrers + ListN int + + // listLast holds the item to start just after + // when listing. + // + // Valid for: + // ReqTagsList + // ReqCatalog + // ReqReferrers + ListLast string +} + +type Kind int + +const ( + // end-1 GET /v2/ 200 404/401 + ReqPing = Kind(iota) + + // Blob-related endpoints + + // end-2 GET /v2//blobs/ 200 404 + ReqBlobGet + + // end-2 HEAD /v2//blobs/ 200 404 + ReqBlobHead + + // end-10 DELETE /v2//blobs/ 202 404/405 + ReqBlobDelete + + // end-4a POST /v2//blobs/uploads/ 202 404 + ReqBlobStartUpload + + // end-4b POST /v2//blobs/uploads/?digest= 201/202 404/400 + ReqBlobUploadBlob + + // end-11 POST /v2//blobs/uploads/?mount=&from= 201 404 + ReqBlobMount + + // end-13 GET /v2//blobs/uploads/ 204 404 + // NOTE: despite being described in the distribution spec, this + // isn't really part of the OCI spec. + ReqBlobUploadInfo + + // end-5 PATCH /v2//blobs/uploads/ 202 404/416 + // NOTE: despite being described in the distribution spec, this + // isn't really part of the OCI spec. + ReqBlobUploadChunk + + // end-6 PUT /v2//blobs/uploads/?digest= 201 404/400 + // NOTE: despite being described in the distribution spec, this + // isn't really part of the OCI spec. + ReqBlobCompleteUpload + + // Manifest-related endpoints + + // end-3 GET /v2//manifests/ 200 404 + ReqManifestGet + + // end-3 HEAD /v2//manifests/ 200 404 + ReqManifestHead + + // end-7 PUT /v2//manifests/ 201 404 + ReqManifestPut + + // end-9 DELETE /v2//manifests/ 202 404/400/405 + ReqManifestDelete + + // Tag-related endpoints + + // end-8a GET /v2//tags/list 200 404 + // end-8b GET /v2//tags/list?n=&last= 200 404 + ReqTagsList + + // Referrer-related endpoints + + // end-12a GET /v2//referrers/ 200 404/400 + ReqReferrersList + + // Catalog endpoints (out-of-spec) + // GET /v2/_catalog + ReqCatalogList +) + +// Parse parses the given HTTP method and URL as an OCI registry request. +// It understands the endpoints described in the [distribution spec]. +// +// If it returns an error, it will be of type *ParseError. +// +// [distribution spec]: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#endpoints +func Parse(method string, u *url.URL) (*Request, error) { + req, err := parse(method, u) + if err != nil { + return nil, &ParseError{err} + } + return req, nil +} + +func parse(method string, u *url.URL) (*Request, error) { + path := u.Path + urlq, err := url.ParseQuery(u.RawQuery) + if err != nil { + return nil, err + } + + var rreq Request + if path == "/v2" || path == "/v2/" { + rreq.Kind = ReqPing + return &rreq, nil + } + path, ok := strings.CutPrefix(path, "/v2/") + if !ok { + return nil, ociregistry.NewError("unknown URL path", ociregistry.ErrNameUnknown.Code(), nil) + } + if path == "_catalog" { + if method != "GET" { + return nil, ErrMethodNotAllowed + } + rreq.Kind = ReqCatalogList + setListQueryParams(&rreq, urlq) + return &rreq, nil + } + uploadPath, ok := strings.CutSuffix(path, "/blobs/uploads/") + if !ok { + uploadPath, ok = strings.CutSuffix(path, "/blobs/uploads") + } + if ok { + rreq.Repo = uploadPath + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + if method != "POST" { + return nil, ErrMethodNotAllowed + } + if d := urlq.Get("mount"); d != "" { + // end-11 + rreq.Digest = d + if !ociregistry.IsValidDigest(rreq.Digest) { + return nil, ociregistry.ErrDigestInvalid + } + rreq.FromRepo = urlq.Get("from") + if rreq.FromRepo == "" { + // There's no "from" argument so fall back to + // a regular chunked upload. + rreq.Kind = ReqBlobStartUpload + // TODO does the "mount" query argument actually take effect in some way? + rreq.Digest = "" + return &rreq, nil + } + if !ociregistry.IsValidRepoName(rreq.FromRepo) { + return nil, ociregistry.ErrNameInvalid + } + rreq.Kind = ReqBlobMount + return &rreq, nil + } + if d := urlq.Get("digest"); d != "" { + // end-4b + rreq.Digest = d + if !ociregistry.IsValidDigest(d) { + return nil, ErrBadlyFormedDigest + } + rreq.Kind = ReqBlobUploadBlob + return &rreq, nil + } + // end-4a + rreq.Kind = ReqBlobStartUpload + return &rreq, nil + } + path, last, ok := cutLast(path, "/") + if !ok { + return nil, ErrNotFound + } + path, lastButOne, ok := cutLast(path, "/") + if !ok { + return nil, ErrNotFound + } + switch lastButOne { + case "blobs": + rreq.Repo = path + if !ociregistry.IsValidDigest(last) { + return nil, ErrBadlyFormedDigest + } + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + rreq.Digest = last + switch method { + case "GET": + rreq.Kind = ReqBlobGet + case "HEAD": + rreq.Kind = ReqBlobHead + case "DELETE": + rreq.Kind = ReqBlobDelete + default: + return nil, ErrMethodNotAllowed + } + return &rreq, nil + case "uploads": + // Note: this section is all specific to ociserver and + // isn't part of the OCI registry spec. + repo, ok := strings.CutSuffix(path, "/blobs") + if !ok { + return nil, ErrNotFound + } + rreq.Repo = repo + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + uploadID64 := last + if uploadID64 == "" { + return nil, ErrNotFound + } + uploadID, err := base64.RawURLEncoding.DecodeString(uploadID64) + if err != nil { + return nil, fmt.Errorf("invalid upload ID %q (cannot decode)", uploadID64) + } + if !utf8.Valid(uploadID) { + return nil, fmt.Errorf("upload ID %q decoded to invalid utf8", uploadID64) + } + rreq.UploadID = string(uploadID) + + switch method { + case "GET": + rreq.Kind = ReqBlobUploadInfo + case "PATCH": + rreq.Kind = ReqBlobUploadChunk + case "PUT": + rreq.Kind = ReqBlobCompleteUpload + rreq.Digest = urlq.Get("digest") + if !ociregistry.IsValidDigest(rreq.Digest) { + return nil, ErrBadlyFormedDigest + } + default: + return nil, ErrMethodNotAllowed + } + return &rreq, nil + case "manifests": + rreq.Repo = path + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + switch { + case ociregistry.IsValidDigest(last): + rreq.Digest = last + case ociregistry.IsValidTag(last): + rreq.Tag = last + default: + return nil, ErrNotFound + } + switch method { + case "GET": + rreq.Kind = ReqManifestGet + case "HEAD": + rreq.Kind = ReqManifestHead + case "PUT": + rreq.Kind = ReqManifestPut + case "DELETE": + rreq.Kind = ReqManifestDelete + default: + return nil, ErrMethodNotAllowed + } + return &rreq, nil + + case "tags": + if last != "list" { + return nil, ErrNotFound + } + if err := setListQueryParams(&rreq, urlq); err != nil { + return nil, err + } + if method != "GET" { + return nil, ErrMethodNotAllowed + } + rreq.Repo = path + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + rreq.Kind = ReqTagsList + return &rreq, nil + case "referrers": + if !ociregistry.IsValidDigest(last) { + return nil, ErrBadlyFormedDigest + } + if method != "GET" { + return nil, ErrMethodNotAllowed + } + rreq.Repo = path + if !ociregistry.IsValidRepoName(rreq.Repo) { + return nil, ociregistry.ErrNameInvalid + } + // TODO is there any kind of pagination for referrers? + // We'll set ListN to be future-proof. + rreq.ListN = -1 + rreq.Digest = last + rreq.Kind = ReqReferrersList + return &rreq, nil + } + return nil, ErrNotFound +} + +func setListQueryParams(rreq *Request, urlq url.Values) error { + rreq.ListN = -1 + if nstr := urlq.Get("n"); nstr != "" { + n, err := strconv.Atoi(nstr) + if err != nil { + return fmt.Errorf("n is not a valid integer: %w", ErrBadRequest) + } + rreq.ListN = n + } + rreq.ListLast = urlq.Get("last") + return nil +} + +func cutLast(s, sep string) (before, after string, found bool) { + if i := strings.LastIndex(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return "", s, false +} + +// ParseRange extracts the start and end offsets from a Content-Range string. +// The resulting start is inclusive and the end exclusive, to match Go convention, +// whereas Content-Range is inclusive on both ends. +func ParseRange(s string) (start, end int64, ok bool) { + p0s, p1s, ok := strings.Cut(s, "-") + if !ok { + return 0, 0, false + } + p0, err0 := strconv.ParseInt(p0s, 10, 64) + p1, err1 := strconv.ParseInt(p1s, 10, 64) + if p1 > 0 { + p1++ + } + return p0, p1, err0 == nil && err1 == nil +} + +// RangeString formats a pair of start and end offsets in the Content-Range form. +// The input start is inclusive and the end exclusive, to match Go convention, +// whereas Content-Range is inclusive on both ends. +func RangeString(start, end int64) string { + end-- + if end < 0 { + end = 0 + } + return fmt.Sprintf("%d-%d", start, end) +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go index 43e43763e6..a60d06a39b 100644 --- a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go +++ b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go @@ -14,65 +14,42 @@ package ociregistry -type Iter[T any] interface { - Close() - Next() (T, bool) - Error() error -} +// TODO(go1.23) when we can depend on Go 1.23, this should be: +// type Seq[T any] = iter.Seq2[T, error] + +// Seq defines the type of an iterator sequence returned from +// the iterator functions. In general, a non-nil +// error means that the item is the last in the sequence. +type Seq[T any] func(yield func(T, error) bool) -func All[T any](it Iter[T]) ([]T, error) { +func All[T any](it Seq[T]) (_ []T, _err error) { xs := []T{} - for { - x, ok := it.Next() - if !ok { - return xs, it.Error() + // TODO(go1.23) for x, err := range it + it(func(x T, err error) bool { + if err != nil { + _err = err + return false } xs = append(xs, x) - } -} - -type sliceIter[T any] struct { - i int - xs []T + return true + }) + return xs, _err } -func SliceIter[T any](xs []T) Iter[T] { - return &sliceIter[T]{ - xs: xs, - } -} - -func (it *sliceIter[T]) Close() {} - -func (it *sliceIter[T]) Next() (T, bool) { - if it.i >= len(it.xs) { - return *new(T), false +func SliceSeq[T any](xs []T) Seq[T] { + return func(yield func(T, error) bool) { + for _, x := range xs { + if !yield(x, nil) { + return + } + } } - x := it.xs[it.i] - it.i++ - return x, true } -func (it *sliceIter[T]) Error() error { - return nil -} - -// ErrorIter returns an iterator that has no +// ErrorSeq returns an iterator that has no // items and always returns the given error. -func ErrorIter[T any](err error) Iter[T] { - return errorIter[T]{err} -} - -type errorIter[T any] struct { - err error -} - -func (it errorIter[T]) Close() {} - -func (it errorIter[T]) Next() (T, bool) { - return *new(T), false -} - -func (it errorIter[T]) Error() error { - return it.err +func ErrorSeq[T any](err error) Seq[T] { + return func(yield func(T, error) bool) { + yield(*new(T), err) + } } diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go new file mode 100644 index 0000000000..955356c278 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go @@ -0,0 +1,519 @@ +package ociauth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strings" + "sync" + "time" + + "cuelabs.dev/go/oci/ociregistry" +) + +// TODO decide on a good value for this. +const oauthClientID = "cuelabs-ociauth" + +var ErrNoAuth = fmt.Errorf("no authorization token available to add to request") + +// stdTransport implements [http.RoundTripper] by acquiring authorization tokens +// using the flows implemented +// by the usual docker clients. Note that this is _not_ documented as +// part of any official OCI spec. +// +// See https://distribution.github.io/distribution/spec/auth/token/ for an overview. +type stdTransport struct { + config Config + transport http.RoundTripper + mu sync.Mutex + registries map[string]*registry +} + +type StdTransportParams struct { + // Config represents the underlying configuration file information. + // It is consulted for authorization information on the hosts + // to which the HTTP requests are made. + Config Config + + // HTTPClient is used to make the underlying HTTP requests. + // If it's nil, [http.DefaultTransport] will be used. + Transport http.RoundTripper +} + +// NewStdTransport returns an [http.RoundTripper] implementation that +// acquires authorization tokens using the flows implemented by the +// usual docker clients. Note that this is _not_ documented as part of +// any official OCI spec. +// +// See https://distribution.github.io/distribution/spec/auth/token/ for an overview. +// +// The RoundTrip method acquires authorization before invoking the +// request. request. It may invoke the request more than once, and can +// use [http.Request.GetBody] to reset the request body if it gets +// consumed. +// +// It ensures that the authorization token used will have at least the +// capability to execute operations in the required scope associated +// with the request context (see [ContextWithRequestInfo]). Any other +// auth scope inside the context (see [ContextWithScope]) may also be +// taken into account when acquiring new tokens. +func NewStdTransport(p StdTransportParams) http.RoundTripper { + if p.Config == nil { + p.Config = emptyConfig{} + } + if p.Transport == nil { + p.Transport = http.DefaultTransport + } + return &stdTransport{ + config: p.Config, + transport: p.Transport, + registries: make(map[string]*registry), + } +} + +// registry holds currently known auth information for a registry. +type registry struct { + host string + transport http.RoundTripper + config Config + initOnce sync.Once + initErr error + + // mu guards the fields that follow it. + mu sync.Mutex + + // wwwAuthenticate holds the Www-Authenticate header from + // the most recent 401 response. If there was a 401 response + // that didn't hold such a header, this will still be non-nil + // but hold a zero authHeader. + wwwAuthenticate *authHeader + + accessTokens []*scopedToken + refreshToken string + basic *userPass +} + +type scopedToken struct { + // scope holds the scope that the token is good for. + scope Scope + // token holds the actual access token. + token string + // expires holds when the token expires. + expires time.Time +} + +type userPass struct { + username string + password string +} + +var forever = time.Date(99999, time.January, 1, 0, 0, 0, 0, time.UTC) + +// RoundTrip implements [http.RoundTripper.RoundTrip]. +func (a *stdTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // From the [http.RoundTripper] docs: + // RoundTrip should not modify the request, except for + // consuming and closing the Request's Body. + req = req.Clone(req.Context()) + + // From the [http.RoundTripper] docs: + // RoundTrip must always close the body, including on errors, [...] + needBodyClose := true + defer func() { + if needBodyClose && req.Body != nil { + req.Body.Close() + } + }() + + a.mu.Lock() + r := a.registries[req.URL.Host] + if r == nil { + r = ®istry{ + host: req.URL.Host, + config: a.config, + transport: a.transport, + } + a.registries[r.host] = r + } + a.mu.Unlock() + if err := r.init(); err != nil { + return nil, err + } + + ctx := req.Context() + requiredScope := RequestInfoFromContext(ctx).RequiredScope + wantScope := ScopeFromContext(ctx) + + if err := r.setAuthorization(ctx, req, requiredScope, wantScope); err != nil { + return nil, err + } + resp, err := r.transport.RoundTrip(req) + + // The underlying transport should now have closed the request body + // so we don't have to. + needBodyClose = false + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + challenge := challengeFromResponse(resp) + if challenge == nil { + return resp, nil + } + authAdded, tokenAcquired, err := r.setAuthorizationFromChallenge(ctx, req, challenge, requiredScope, wantScope) + if err != nil { + resp.Body.Close() + return nil, err + } + if !authAdded { + // Couldn't acquire any more authorization than we had initially. + return resp, nil + } + resp.Body.Close() + // rewind request body if needed and possible. + if req.GetBody != nil { + req.Body, err = req.GetBody() + if err != nil { + return nil, err + } + } + resp, err = r.transport.RoundTrip(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized || !tokenAcquired { + return resp, nil + } + // The server has responded with Unauthorized (401) even though we've just + // provided a token that it gave us. Treat it as Forbidden (403) instead. + // TODO include the original body/error as part of the message or message detail? + resp.Body.Close() + data, err := json.Marshal(&ociregistry.WireErrors{ + Errors: []ociregistry.WireError{{ + Code_: ociregistry.ErrDenied.Code(), + Message: "unauthorized response with freshly acquired auth token", + }}, + }) + if err != nil { + return nil, fmt.Errorf("cannot marshal response body: %v", err) + } + resp.Header.Set("Content-Type", "application/json") + resp.ContentLength = int64(len(data)) + resp.Body = io.NopCloser(bytes.NewReader(data)) + resp.StatusCode = http.StatusForbidden + resp.Status = http.StatusText(resp.StatusCode) + return resp, nil +} + +// setAuthorization sets up authorization on the given request using any +// auth information currently available. +func (r *registry) setAuthorization(ctx context.Context, req *http.Request, requiredScope, wantScope Scope) error { + r.mu.Lock() + defer r.mu.Unlock() + // Remove tokens that have expired or will expire soon so that + // the caller doesn't start using a token only for it to expire while it's + // making the request. + r.deleteExpiredTokens(time.Now().UTC().Add(time.Second)) + + if accessToken := r.accessTokenForScope(requiredScope); accessToken != nil { + // We have a potentially valid access token. Use it. + req.Header.Set("Authorization", "Bearer "+accessToken.token) + return nil + } + if r.wwwAuthenticate == nil { + // We haven't seen a 401 response yet. Avoid putting any + // basic authorization in the request, because that can mean that + // the server sends a 401 response without a Www-Authenticate + // header. + return nil + } + if r.refreshToken != "" && r.wwwAuthenticate.scheme == "bearer" { + // We've got a refresh token that we can use to try to + // acquire an access token and we've seen a Www-Authenticate response + // that tells us how we can use it. + + // TODO we're holding the lock (r.mu) here, which is precluding + // acquiring several tokens concurrently. We should relax the lock + // to allow that. + + accessToken, err := r.acquireAccessToken(ctx, requiredScope, wantScope) + if err != nil { + // Avoid using %w to wrap the error because we don't want the + // caller of RoundTrip (usually ociclient) to assume that the + // error applies to the target server rather than the token server. + return fmt.Errorf("cannot acquire access token: %v", err) + } + req.Header.Set("Authorization", "Bearer "+accessToken) + return nil + } + if r.wwwAuthenticate.scheme != "bearer" && r.basic != nil { + req.SetBasicAuth(r.basic.username, r.basic.password) + return nil + } + return nil +} + +func (r *registry) setAuthorizationFromChallenge(ctx context.Context, req *http.Request, challenge *authHeader, requiredScope, wantScope Scope) (authAdded, tokenAcquired bool, _ error) { + r.mu.Lock() + defer r.mu.Unlock() + r.wwwAuthenticate = challenge + + switch { + case r.wwwAuthenticate.scheme == "bearer": + scope := ParseScope(r.wwwAuthenticate.params["scope"]) + accessToken, err := r.acquireAccessToken(ctx, scope, wantScope.Union(requiredScope)) + if err != nil { + return false, false, err + } + req.Header.Set("Authorization", "Bearer "+accessToken) + return true, true, nil + case r.basic != nil: + req.SetBasicAuth(r.basic.username, r.basic.password) + return true, false, nil + } + return false, false, nil +} + +// init initializes the registry instance by acquiring auth information from +// the Config, if available. As this might be slow (invoking EntryForRegistry +// can end up invoking slow external commands), we ensure that it's only +// done once. +// TODO it's possible that this could take a very long time, during which +// the outer context is cancelled, but we'll ignore that. We probably shouldn't. +func (r *registry) init() error { + inner := func() error { + info, err := r.config.EntryForRegistry(r.host) + if err != nil { + return fmt.Errorf("cannot acquire auth info for registry %q: %v", r.host, err) + } + r.refreshToken = info.RefreshToken + if info.AccessToken != "" { + r.accessTokens = append(r.accessTokens, &scopedToken{ + scope: UnlimitedScope(), + token: info.AccessToken, + expires: forever, + }) + } + if info.Username != "" && info.Password != "" { + r.basic = &userPass{ + username: info.Username, + password: info.Password, + } + } + return nil + } + r.initOnce.Do(func() { + r.initErr = inner() + }) + return r.initErr +} + +// acquireAccessToken tries to acquire an access token for authorizing a request. +// The requiredScopeStr parameter indicates the scope that's definitely +// required. This is a string because apparently some servers are picky +// about getting exactly the same scope in the auth request that was +// returned in the challenge. The wantScope parameter indicates +// what scope might be required in the future. +// +// This method assumes that there has been a previous 401 response with +// a Www-Authenticate: Bearer... header. +func (r *registry) acquireAccessToken(ctx context.Context, requiredScope, wantScope Scope) (string, error) { + scope := requiredScope.Union(wantScope) + tok, err := r.acquireToken(ctx, scope) + if err != nil { + var herr ociregistry.HTTPError + if !errors.As(err, &herr) || herr.StatusCode() != http.StatusUnauthorized { + return "", err + } + // The documentation says this: + // + // If the client only has a subset of the requested + // access it _must not be considered an error_ as it is + // not the responsibility of the token server to + // indicate authorization errors as part of this + // workflow. + // + // However it's apparently not uncommon for servers to reject + // such requests anyway, so if we've got an unauthorized error + // and wantScope goes beyond requiredScope, it may be because + // the server is rejecting the request. + scope = requiredScope + tok, err = r.acquireToken(ctx, scope) + if err != nil { + return "", err + } + // TODO mark the registry as picky about tokens so we don't + // attempt twice every time? + } + if tok.RefreshToken != "" { + r.refreshToken = tok.RefreshToken + } + accessToken := tok.Token + if accessToken == "" { + accessToken = tok.AccessToken + } + if accessToken == "" { + return "", fmt.Errorf("no access token found in auth server response") + } + var expires time.Time + now := time.Now().UTC() + if tok.ExpiresIn == 0 { + expires = now.Add(60 * time.Second) // TODO link to where this is mentioned + } else { + expires = now.Add(time.Duration(tok.ExpiresIn) * time.Second) + } + r.accessTokens = append(r.accessTokens, &scopedToken{ + scope: scope, + token: accessToken, + expires: expires, + }) + // TODO persist the access token to save round trips when doing + // the authorization flow in a newly run executable. + return accessToken, nil +} + +func (r *registry) acquireToken(ctx context.Context, scope Scope) (*wireToken, error) { + realm := r.wwwAuthenticate.params["realm"] + if realm == "" { + return nil, fmt.Errorf("malformed Www-Authenticate header (missing realm)") + } + if r.refreshToken != "" { + v := url.Values{} + v.Set("scope", scope.String()) + if service := r.wwwAuthenticate.params["service"]; service != "" { + v.Set("service", service) + } + v.Set("client_id", oauthClientID) + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", r.refreshToken) + req, err := http.NewRequestWithContext(ctx, "POST", realm, strings.NewReader(v.Encode())) + if err != nil { + return nil, fmt.Errorf("cannot form HTTP request to %q: %v", realm, err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + tok, err := r.doTokenRequest(req) + if err == nil { + return tok, nil + } + var herr ociregistry.HTTPError + if !errors.As(err, &herr) || herr.StatusCode() != http.StatusNotFound { + return tok, err + } + // The request to the endpoint returned 404 from the POST request, + // Note: Not all token servers implement oauth2, so fall + // back to using a GET with basic auth. + // See the Token documentation for the HTTP GET method supported by all token servers. + // TODO where in that documentation is this documented? + } + u, err := url.Parse(realm) + if err != nil { + return nil, fmt.Errorf("malformed Www-Authenticate header (malformed realm %q): %v", realm, err) + } + v := u.Query() + // TODO where is it documented that we should send multiple scope + // attributes rather than a single space-separated attribute as + // the POST method does? + v["scope"] = strings.Split(scope.String(), " ") + if service := r.wwwAuthenticate.params["service"]; service != "" { + // TODO the containerregistry code sets this even if it's empty. + // Is that better? + v.Set("service", service) + } + u.RawQuery = v.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return nil, err + } + // TODO if there's an unlimited-scope access token, the original code + // will use it as Bearer authorization at this point. If + // that's valid, why are we even acquiring another token? + if r.basic != nil { + req.SetBasicAuth(r.basic.username, r.basic.password) + } + return r.doTokenRequest(req) +} + +// wireToken describes the JSON encoding used in the response to a token +// acquisition method. The comments are taken from the [token docs] +// and made available here for ease of reference. +// +// [token docs]: https://distribution.github.io/distribution/spec/auth/token/#token-response-fields +type wireToken struct { + // Token holds an opaque Bearer token that clients should supply + // to subsequent requests in the Authorization header. + // AccessToken is provided for compatibility with OAuth 2.0: it's equivalent to Token. + // At least one of these fields must be specified, but both may also appear (for compatibility with older clients). + // When both are specified, they should be equivalent; if they differ the client's choice is undefined. + Token string `json:"token"` + AccessToken string `json:"access_token,omitempty"` + + // Refresh token optionally holds a token which can be used to + // get additional access tokens for the same subject with different scopes. + // This token should be kept secure by the client and only sent + // to the authorization server which issues bearer tokens. This + // field will only be set when `offline_token=true` is provided + // in the request. + RefreshToken string `json:"refresh_token"` + + // ExpiresIn holds the duration in seconds since the token was + // issued that it will remain valid. When omitted, this defaults + // to 60 seconds. For compatibility with older clients, a token + // should never be returned with less than 60 seconds to live. + ExpiresIn int `json:"expires_in"` +} + +func (r *registry) doTokenRequest(req *http.Request) (*wireToken, error) { + client := &http.Client{ + Transport: r.transport, + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, bodyErr := io.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return nil, ociregistry.NewHTTPError(nil, resp.StatusCode, resp, data) + } + if bodyErr != nil { + return nil, fmt.Errorf("error reading response body: %v", err) + } + var tok wireToken + if err := json.Unmarshal(data, &tok); err != nil { + return nil, fmt.Errorf("malformed JSON token in response: %v", err) + } + return &tok, nil +} + +// deleteExpiredTokens removes all tokens from r that expire after the given +// time. +// TODO ask the store to remove expired tokens? +func (r *registry) deleteExpiredTokens(now time.Time) { + r.accessTokens = slices.DeleteFunc(r.accessTokens, func(tok *scopedToken) bool { + return now.After(tok.expires) + }) +} + +func (r *registry) accessTokenForScope(scope Scope) *scopedToken { + for _, tok := range r.accessTokens { + if tok.scope.Contains(scope) { + // TODO prefer tokens with less scope? + return tok + } + } + return nil +} + +type emptyConfig struct{} + +func (emptyConfig) EntryForRegistry(host string) (ConfigEntry, error) { + return ConfigEntry{}, nil +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go new file mode 100644 index 0000000000..d9c6b684da --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go @@ -0,0 +1,358 @@ +package ociauth + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "sort" + "strings" +) + +// AuthConfig represents access to system level (e.g. config-file or command-execution based) +// configuration information. +// +// It's OK to call EntryForRegistry concurrently. +type Config interface { + // EntryForRegistry returns auth information for the given host. + // If there's no information available, it should return the zero ConfigEntry + // and nil. + EntryForRegistry(host string) (ConfigEntry, error) +} + +// ConfigEntry holds auth information for a registry. +// It mirrors the information obtainable from the .docker/config.json +// file and from the docker credential helper protocol +type ConfigEntry struct { + // RefreshToken holds a token that can be used to obtain an access token. + RefreshToken string + // AccessToken holds a bearer token to be sent to a registry. + AccessToken string + // Username holds the username for use with basic auth. + Username string + // Password holds the password for use with Username. + Password string +} + +// ConfigFile holds auth information for OCI registries as read from a configuration file. +// It implements [Config]. +type ConfigFile struct { + data configData + runner HelperRunner +} + +var ErrHelperNotFound = errors.New("helper not found") + +// HelperRunner is the function used to execute auth "helper" +// commands. It's passed the helper name as specified in the configuration file, +// without the "docker-credential-helper-" prefix. +// +// If the credentials are not found, it should return the zero AuthInfo +// and no error. +// +// If the helper doesn't exist, it should return an [ErrHelperNotFound] error. +type HelperRunner = func(helperName string, serverURL string) (ConfigEntry, error) + +// configData holds the part of ~/.docker/config.json that pertains to auth. +type configData struct { + Auths map[string]authConfig `json:"auths"` + CredsStore string `json:"credsStore,omitempty"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +// authConfig contains authorization information for connecting to a Registry. +type authConfig struct { + // derivedFrom records the entries from which this one was derived. + // If this is empty, the entry was explicitly present. + derivedFrom []string + + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + // Auth is an alternative way of specifying username and password + // (in base64(username:password) form. + Auth string `json:"auth,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// LoadWithEnv is like [Load] but takes environment variables in the form +// returned by [os.Environ] instead of calling [os.Getenv]. If env +// is nil, the current process's environment will be used. +func LoadWithEnv(runner HelperRunner, env []string) (*ConfigFile, error) { + if runner == nil { + runner = ExecHelperWithEnv(env) + } + getenv := os.Getenv + if env != nil { + getenv = getenvFunc(env) + } + for _, f := range configFileLocations { + filename := f(getenv) + if filename == "" { + continue + } + data, err := os.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, err + } + f, err := decodeConfigFile(data) + if err != nil { + return nil, fmt.Errorf("invalid config file %q: %v", filename, err) + } + return &ConfigFile{ + data: f, + runner: runner, + }, nil + } + return &ConfigFile{ + runner: runner, + }, nil +} + +// Load loads the auth configuration from the first location it can find. +// It uses runner to run any external helper commands; if runner +// is nil, [ExecHelper] will be used. +// +// In order it tries: +// - $DOCKER_CONFIG/config.json +// - ~/.docker/config.json +// - $XDG_RUNTIME_DIR/containers/auth.json +func Load(runner HelperRunner) (*ConfigFile, error) { + return LoadWithEnv(runner, nil) +} + +func getenvFunc(env []string) func(string) string { + return func(key string) string { + for i := len(env) - 1; i >= 0; i-- { + if e := env[i]; len(e) >= len(key)+1 && e[len(key)] == '=' && e[:len(key)] == key { + return e[len(key)+1:] + } + } + return "" + } +} + +var configFileLocations = []func(func(string) string) string{ + func(getenv func(string) string) string { + if d := getenv("DOCKER_CONFIG"); d != "" { + return filepath.Join(d, "config.json") + } + return "" + }, + func(getenv func(string) string) string { + if home := userHomeDir(getenv); home != "" { + return filepath.Join(home, ".docker", "config.json") + } + return "" + }, + // If neither of the above locations was found, look for Podman's auth at + // $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a + // Docker config. + func(getenv func(string) string) string { + if d := getenv("XDG_RUNTIME_DIR"); d != "" { + return filepath.Join(d, "containers", "auth.json") + } + return "" + }, +} + +// userHomeDir returns the current user's home directory. +// The logic in this is directly derived from the logic in +// [os.UserHomeDir] as of go 1.22.0. +// +// It's defined as a variable so it can be patched in tests. +var userHomeDir = func(getenv func(string) string) string { + env := "HOME" + switch runtime.GOOS { + case "windows": + env = "USERPROFILE" + case "plan9": + env = "home" + } + if v := getenv(env); v != "" { + return v + } + // On some geese the home directory is not always defined. + switch runtime.GOOS { + case "android": + return "/sdcard" + case "ios": + return "/" + } + return "" +} + +// EntryForRegistry implements [Authorizer.InfoForRegistry]. +// If no registry is found, it returns the zero [ConfigEntry] and a nil error. +func (c *ConfigFile) EntryForRegistry(registryHostname string) (ConfigEntry, error) { + helper, ok := c.data.CredHelpers[registryHostname] + explicit := true + if !ok { + helper = c.data.CredsStore + explicit = false + } + if helper != "" { + entry, err := c.runner(helper, registryHostname) + if err == nil || explicit || !errors.Is(err, ErrHelperNotFound) { + return entry, err + } + // The helper command isn't found and it's a fallback default. + // Don't treat that as an error, because it's common for + // a helper default to be set up without the helper actually + // existing. See https://github.com/cue-lang/cue/issues/2934. + } + auth := c.data.Auths[registryHostname] + if auth.IdentityToken != "" && auth.Username != "" { + return ConfigEntry{}, fmt.Errorf("ambiguous auth credentials") + } + if len(auth.derivedFrom) > 1 { + return ConfigEntry{}, fmt.Errorf("more than one auths entry for %q (%s)", registryHostname, strings.Join(auth.derivedFrom, ", ")) + } + + return ConfigEntry{ + RefreshToken: auth.IdentityToken, + AccessToken: auth.RegistryToken, + Username: auth.Username, + Password: auth.Password, + }, nil +} + +func decodeConfigFile(data []byte) (configData, error) { + var f configData + if err := json.Unmarshal(data, &f); err != nil { + return configData{}, fmt.Errorf("decode failed: %v", err) + } + for addr, ac := range f.Auths { + if ac.Auth != "" { + var err error + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return configData{}, fmt.Errorf("cannot decode auth field for %q: %v", addr, err) + } + } + f.Auths[addr] = ac + if !strings.Contains(addr, "//") { + continue + } + // It looks like it might be a URL, so follow the original logic + // and extract the host name for later lookup. Explicit + // entries override implicit, and if several entries map to + // the same host, we record that so we can return an error + // later if that host is looked up (this avoids the nondeterministic + // behavior found in the original code when this happens). + addr1 := urlHost(addr) + if addr1 == addr { + continue + } + if ac1, ok := f.Auths[addr1]; ok { + if len(ac1.derivedFrom) == 0 { + // Don't override an explicit entry. + continue + } + ac = ac1 + } + ac.derivedFrom = append(ac.derivedFrom, addr) + sort.Strings(ac.derivedFrom) + f.Auths[addr1] = ac + } + return f, nil +} + +// urlHost returns the host part of a registry URL. +// Mimics [github.com/docker/docker/registry.ConvertToHostname] +// to keep the logic the same as that. +func urlHost(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + hostName, _, _ := strings.Cut(stripped, "/") + return hostName +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + s, err := base64.StdEncoding.DecodeString(authStr) + if err != nil { + return "", "", fmt.Errorf("invalid base64-encoded string") + } + username, password, ok := strings.Cut(string(s), ":") + if !ok || username == "" { + return "", "", errors.New("no username found") + } + // The zero-byte-trimming logic here mimics the logic in the + // docker CLI configfile package. + return username, strings.Trim(password, "\x00"), nil +} + +// ExecHelper executes an external program to get the credentials from a native store. +// It implements [HelperRunner]. +func ExecHelper(helperName string, serverURL string) (ConfigEntry, error) { + return ExecHelperWithEnv(nil)(helperName, serverURL) +} + +// ExecHelperWithEnv returns a [HelperRunner] that behaves like [ExecHelper] +// except that, if env is non-nil, it will be used as the set of environment +// variables to pass to the executed helper command. If env is nil, +// the current process's environment will be used. +func ExecHelperWithEnv(env []string) HelperRunner { + return func(helperName string, serverURL string) (ConfigEntry, error) { + var out bytes.Buffer + cmd := exec.Command("docker-credential-"+helperName, "get") + // TODO this doesn't produce a decent error message for + // other helpers such as gcloud that print errors to stderr. + cmd.Stdin = strings.NewReader(serverURL) + cmd.Stdout = &out + cmd.Stderr = &out + cmd.Env = env + if err := cmd.Run(); err != nil { + if !errors.As(err, new(*exec.ExitError)) { + if errors.Is(err, exec.ErrNotFound) { + return ConfigEntry{}, fmt.Errorf("%w: %v", ErrHelperNotFound, err) + } + return ConfigEntry{}, fmt.Errorf("cannot run auth helper: %v", err) + } + t := strings.TrimSpace(out.String()) + if t == "credentials not found in native keychain" { + return ConfigEntry{}, nil + } + return ConfigEntry{}, fmt.Errorf("error getting credentials: %s", t) + } + + // helperCredentials defines the JSON encoding of the data printed + // by credentials helper programs. + type helperCredentials struct { + Username string + Secret string + } + var creds helperCredentials + if err := json.Unmarshal(out.Bytes(), &creds); err != nil { + return ConfigEntry{}, err + } + if creds.Username == "" { + return ConfigEntry{ + RefreshToken: creds.Secret, + }, nil + } + return ConfigEntry{ + Password: creds.Secret, + Username: creds.Username, + }, nil + } +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go new file mode 100644 index 0000000000..8ca1a3016b --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go @@ -0,0 +1,167 @@ +package ociauth + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// authHeader holds the parsed contents of a Www-Authenticate HTTP header. +type authHeader struct { + scheme string + params map[string]string +} + +func challengeFromResponse(resp *http.Response) *authHeader { + var h *authHeader + for _, chalStr := range resp.Header["Www-Authenticate"] { + h1 := parseWWWAuthenticate(chalStr) + if h1 == nil { + continue + } + if h1.scheme != "basic" && h1.scheme != "bearer" { + continue + } + if h == nil { + h = h1 + } else if h1.scheme == "basic" && h.scheme == "bearer" { + // We prefer basic auth to bearer auth. + h = h1 + } + } + return h +} + +// parseWWWAuthenticate parses the contents of a Www-Authenticate HTTP header. +// It returns nil if the parsing fails. +func parseWWWAuthenticate(header string) *authHeader { + var h authHeader + h.params = make(map[string]string) + + scheme, s := expectToken(header) + if scheme == "" { + return nil + } + h.scheme = strings.ToLower(scheme) + s = skipSpace(s) + for len(s) > 0 { + var pkey, pvalue string + pkey, s = expectToken(skipSpace(s)) + if pkey == "" { + return nil + } + if !strings.HasPrefix(s, "=") { + return nil + } + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return nil + } + h.params[strings.ToLower(pkey)] = pvalue + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + break + } + s = s[1:] + } + if len(s) > 0 { + return nil + } + return &h +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go new file mode 100644 index 0000000000..cd691d61a6 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go @@ -0,0 +1,53 @@ +package ociauth + +import ( + "context" +) + +type scopeKey struct{} + +// ContextWithScope returns ctx annotated with the given +// scope. When the ociauth transport receives a request with a scope in the context, +// it will treat it as "desired authorization scope"; new authorization tokens +// will be acquired with that scope as well as any scope required by +// the operation. +func ContextWithScope(ctx context.Context, s Scope) context.Context { + return context.WithValue(ctx, scopeKey{}, s) +} + +// ScopeFromContext returns any scope associated with the context +// by [ContextWithScope]. +func ScopeFromContext(ctx context.Context) Scope { + s, _ := ctx.Value(scopeKey{}).(Scope) + return s +} + +type requestInfoKey struct{} + +// RequestInfo provides information about the OCI request that +// is currently being made. It is expected to be attached to an HTTP +// request context. The [ociclient] package will add this to all +// requests that is makes. +type RequestInfo struct { + // RequiredScope holds the authorization scope that's required + // by the request. The ociauth logic will reuse any available + // auth token that has this scope. When acquiring a new token, + // it will add any scope found in [ScopeFromContext] too. + RequiredScope Scope +} + +// ContextWithRequestInfo returns ctx annotated with the given +// request informaton. When ociclient receives a request with +// this attached, it will respect info.RequiredScope to determine +// what auth tokens to reuse. When it acquires a new token, +// it will ask for the union of info.RequiredScope [ScopeFromContext]. +func ContextWithRequestInfo(ctx context.Context, info RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, info) +} + +// RequestInfoFromContext returns any request information associated with the context +// by [ContextWithRequestInfo]. +func RequestInfoFromContext(ctx context.Context) RequestInfo { + info, _ := ctx.Value(requestInfoKey{}).(RequestInfo) + return info +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go new file mode 100644 index 0000000000..6b37cd5894 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go @@ -0,0 +1,492 @@ +package ociauth + +import ( + "math/bits" + "slices" + "strings" +) + +// knownAction represents an action that we know about +// and use a more efficient internal representation for. +type knownAction byte + +const ( + unknownAction knownAction = iota + // Note: ordered by lexical string representation. + pullAction + pushAction + numActions +) + +const ( + // Known resource types. + TypeRepository = "repository" + TypeRegistry = "registry" + + // Known action types. + ActionPull = "pull" + ActionPush = "push" +) + +func (a knownAction) String() string { + switch a { + case pullAction: + return ActionPull + case pushAction: + return ActionPush + default: + return "unknown" + } +} + +// CatalogScope defines the resource scope used to allow +// listing all the items in a registry. +var CatalogScope = ResourceScope{ + ResourceType: TypeRegistry, + Resource: "catalog", + Action: "*", +} + +// ResourceScope defines a component of an authorization scope +// associated with a single resource and action only. +// See [Scope] for a way of combining multiple ResourceScopes +// into a single value. +type ResourceScope struct { + // ResourceType holds the type of resource the scope refers to. + // Known values for this include TypeRegistry and TypeRepository. + // When a scope does not conform to the standard resourceType:resource:actions + // syntax, ResourceType will hold the entire scope. + ResourceType string + + // Resource names the resource the scope pertains to. + // For resource type TypeRepository, this will be the name of the repository. + Resource string + + // Action names an action that can be performed on the resource. + // This is usually ActionPush or ActionPull. + Action string +} + +func (rs1 ResourceScope) Equal(rs2 ResourceScope) bool { + return rs1.Compare(rs2) == 0 +} + +// Compare returns -1, 0 or 1 depending on whether +// rs1 compares less than, equal, or greater than, rs2. +// +// In most to least precedence, the fields are compared in the order +// ResourceType, Resource, Action. +func (rs1 ResourceScope) Compare(rs2 ResourceScope) int { + if c := strings.Compare(rs1.ResourceType, rs2.ResourceType); c != 0 { + return c + } + if c := strings.Compare(rs1.Resource, rs2.Resource); c != 0 { + return c + } + return strings.Compare(rs1.Action, rs2.Action) +} + +func (rs ResourceScope) isKnown() bool { + switch rs.ResourceType { + case TypeRepository: + return parseKnownAction(rs.Action) != unknownAction + case TypeRegistry: + return rs == CatalogScope + } + return false +} + +// Scope holds a set of [ResourceScope] values. The zero value +// represents the empty set. +type Scope struct { + // original holds the original string from which + // this Scope was parsed. This maintains the string + // representation unchanged as far as possible. + original string + + // unlimited holds whether this scope is considered to include all + // other scopes. + unlimited bool + + // repositories holds all the repositories that the scope + // refers to. An empty repository name implies a CatalogScope + // entry. The elements of this are maintained in sorted order. + repositories []string + + // actions holds an element for each element in repositories + // defining the set of allowed actions for that repository + // as a bitmask of 1< 0 && s.repositories[i-1] == rs.Resource { + s.actions[i-1] |= actionMask + } else { + s.repositories = append(s.repositories, rs.Resource) + s.actions = append(s.actions, actionMask) + } + } + slices.SortFunc(s.others, ResourceScope.Compare) + s.others = slices.Compact(s.others) + return s +} + +// Len returns the number of ResourceScopes in the scope set. +// It panics if the scope is unlimited. +func (s Scope) Len() int { + if s.IsUnlimited() { + panic("Len called on unlimited scope") + } + n := len(s.others) + for _, b := range s.actions { + n += bits.OnesCount8(b) + } + return n +} + +// UnlimitedScope returns a scope that contains all other +// scopes. This is not representable in the docker scope syntax, +// but it's useful to represent the scope of tokens that can +// be used for arbitrary access. +func UnlimitedScope() Scope { + return Scope{ + unlimited: true, + } +} + +// IsUnlimited reports whether s is unlimited in scope. +func (s Scope) IsUnlimited() bool { + return s.unlimited +} + +// IsEmpty reports whether the scope holds the empty set. +func (s Scope) IsEmpty() bool { + return len(s.repositories) == 0 && + len(s.others) == 0 && + !s.unlimited +} + +// Iter returns an iterator over all the individual scopes that are +// part of s. The items will be produced according to [Scope.Compare] +// ordering. +// +// The unlimited scope does not yield any scopes. +func (s Scope) Iter() func(yield func(ResourceScope) bool) { + return func(yield0 func(ResourceScope) bool) { + if s.unlimited { + return + } + others := s.others + yield := func(scope ResourceScope) bool { + // Yield any scopes from others that are ready to + // be produced, thus preserving ordering of all + // values in the iterator. + for len(others) > 0 && others[0].Compare(scope) < 0 { + if !yield0(others[0]) { + return false + } + others = others[1:] + } + return yield0(scope) + } + for i, repo := range s.repositories { + if repo == "" { + if !yield(CatalogScope) { + return + } + continue + } + acts := s.actions[i] + for k := knownAction(0); k < numActions; k++ { + if acts&(1< 0 { + buf.WriteByte(' ') + } + buf.WriteString(s.ResourceType) + if s.Resource != "" || s.Action != "" { + buf.WriteByte(':') + buf.WriteString(s.Resource) + buf.WriteByte(':') + buf.WriteString(s.Action) + } + return true + }) + return buf.String() +} + +func parseKnownAction(s string) knownAction { + switch s { + case ActionPull: + return pullAction + case ActionPush: + return pushAction + default: + return unknownAction + } +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go new file mode 100644 index 0000000000..bc78b328f6 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go @@ -0,0 +1,422 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ociclient provides an implementation of ociregistry.Interface that +// uses HTTP to talk to the remote registry. +package ociclient + +import ( + "bytes" + "context" + "fmt" + "hash" + "io" + "log" + "net/http" + "net/url" + "strconv" + "strings" + "sync/atomic" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" + "cuelabs.dev/go/oci/ociregistry/ociauth" +) + +// debug enables logging. +// TODO this should be configurable in the API. +const debug = false + +type Options struct { + // DebugID is used to prefix any log messages printed by the client. + DebugID string + + // Transport is used to make HTTP requests. The context passed + // to its RoundTrip method will have an appropriate + // [ociauth.RequestInfo] value added, suitable for consumption + // by the transport created by [ociauth.NewStdTransport]. If + // Transport is nil, [http.DefaultTransport] will be used. + Transport http.RoundTripper + + // Insecure specifies whether an http scheme will be used to + // address the host instead of https. + Insecure bool + + // ListPageSize configures the maximum number of results + // requested when making list requests. If it's <= zero, it + // defaults to DefaultListPageSize. + ListPageSize int +} + +// See https://github.com/google/go-containerregistry/issues/1091 +// for an early report of the issue alluded to below. + +// DefaultListPageSize holds the default number of results +// to request when using the list endpoints. +// It's not more than 1000 because AWS ECR complains +// it it's more than that. +const DefaultListPageSize = 1000 + +var debugID int32 + +// New returns a registry implementation that uses the OCI +// HTTP API. A nil opts parameter is equivalent to a pointer +// to zero Options. +// +// The host specifies the host name to talk to; it may +// optionally be a host:port pair. +func New(host string, opts0 *Options) (ociregistry.Interface, error) { + var opts Options + if opts0 != nil { + opts = *opts0 + } + if opts.DebugID == "" { + opts.DebugID = fmt.Sprintf("id%d", atomic.AddInt32(&debugID, 1)) + } + if opts.Transport == nil { + opts.Transport = http.DefaultTransport + } + // Check that it's a valid host by forming a URL from it and checking that it matches. + u, err := url.Parse("https://" + host + "/path") + if err != nil { + return nil, fmt.Errorf("invalid host %q", host) + } + if u.Host != host { + return nil, fmt.Errorf("invalid host %q (does not correctly form a host part of a URL)", host) + } + if opts.Insecure { + u.Scheme = "http" + } + if opts.ListPageSize == 0 { + opts.ListPageSize = DefaultListPageSize + } + return &client{ + httpHost: host, + httpScheme: u.Scheme, + httpClient: &http.Client{ + Transport: opts.Transport, + }, + debugID: opts.DebugID, + listPageSize: opts.ListPageSize, + }, nil +} + +type client struct { + *ociregistry.Funcs + httpScheme string + httpHost string + httpClient *http.Client + debugID string + listPageSize int +} + +type descriptorRequired byte + +const ( + requireSize descriptorRequired = 1 << iota + requireDigest +) + +// descriptorFromResponse tries to form a descriptor from an HTTP response, +// filling in the Digest field using knownDigest if it's not present. +// +// Note: this implies that the Digest field will be empty if there is no +// digest in the response and knownDigest is empty. +func descriptorFromResponse(resp *http.Response, knownDigest digest.Digest, require descriptorRequired) (ociregistry.Descriptor, error) { + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/octet-stream" + } + size := int64(0) + if (require & requireSize) != 0 { + if resp.StatusCode == http.StatusPartialContent { + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return ociregistry.Descriptor{}, fmt.Errorf("no Content-Range in partial content response") + } + i := strings.LastIndex(contentRange, "/") + if i == -1 { + return ociregistry.Descriptor{}, fmt.Errorf("malformed Content-Range %q", contentRange) + } + contentSize, err := strconv.ParseInt(contentRange[i+1:], 10, 64) + if err != nil { + return ociregistry.Descriptor{}, fmt.Errorf("malformed Content-Range %q", contentRange) + } + size = contentSize + } else { + if resp.ContentLength < 0 { + return ociregistry.Descriptor{}, fmt.Errorf("unknown content length") + } + size = resp.ContentLength + } + } + digest := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if digest != "" { + if !ociregistry.IsValidDigest(string(digest)) { + return ociregistry.Descriptor{}, fmt.Errorf("bad digest %q found in response", digest) + } + } else { + digest = knownDigest + } + if (require&requireDigest) != 0 && digest == "" { + return ociregistry.Descriptor{}, fmt.Errorf("no digest found in response") + } + return ociregistry.Descriptor{ + Digest: digest, + MediaType: contentType, + Size: size, + }, nil +} + +func newBlobReader(r io.ReadCloser, desc ociregistry.Descriptor) *blobReader { + return &blobReader{ + r: r, + digester: desc.Digest.Algorithm().Hash(), + desc: desc, + verify: true, + } +} + +func newBlobReaderUnverified(r io.ReadCloser, desc ociregistry.Descriptor) *blobReader { + br := newBlobReader(r, desc) + br.verify = false + return br +} + +type blobReader struct { + r io.ReadCloser + n int64 + digester hash.Hash + desc ociregistry.Descriptor + verify bool +} + +func (r *blobReader) Descriptor() ociregistry.Descriptor { + return r.desc +} + +func (r *blobReader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + r.n += int64(n) + r.digester.Write(buf[:n]) + if err == nil { + if r.n > r.desc.Size { + // Fail early when the blob is too big; we can do that even + // when we're not verifying for other use cases. + return n, fmt.Errorf("blob size exceeds content length %d: %w", r.desc.Size, ociregistry.ErrSizeInvalid) + } + return n, nil + } + if err != io.EOF { + return n, err + } + if !r.verify { + return n, io.EOF + } + if r.n != r.desc.Size { + return n, fmt.Errorf("blob size mismatch (%d/%d): %w", r.n, r.desc.Size, ociregistry.ErrSizeInvalid) + } + gotDigest := digest.NewDigest(r.desc.Digest.Algorithm(), r.digester) + if gotDigest != r.desc.Digest { + return n, fmt.Errorf("digest mismatch when reading blob") + } + return n, io.EOF +} + +func (r *blobReader) Close() error { + return r.r.Close() +} + +// TODO make this list configurable. +var knownManifestMediaTypes = []string{ + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, + "application/vnd.oci.artifact.manifest.v1+json", // deprecated. + "application/vnd.docker.distribution.manifest.v1+json", + "application/vnd.docker.distribution.manifest.v2+json", + "application/vnd.docker.distribution.manifest.list.v2+json", + // Technically this wildcard should be sufficient, but it isn't + // recognized by some registries. + "*/*", +} + +// doRequest performs the given OCI request, sending it with the given body (which may be nil). +func (c *client) doRequest(ctx context.Context, rreq *ocirequest.Request, okStatuses ...int) (*http.Response, error) { + req, err := newRequest(ctx, rreq, nil) + if err != nil { + return nil, err + } + if rreq.Kind == ocirequest.ReqManifestGet || rreq.Kind == ocirequest.ReqManifestHead { + // When getting manifests, some servers won't return + // the content unless there's an Accept header, so + // add all the manifest kinds that we know about. + req.Header["Accept"] = knownManifestMediaTypes + } + resp, err := c.do(req, okStatuses...) + if err != nil { + return nil, err + } + if resp.StatusCode/100 == 2 { + return resp, nil + } + defer resp.Body.Close() + return nil, makeError(resp) +} + +func (c *client) do(req *http.Request, okStatuses ...int) (*http.Response, error) { + if req.URL.Scheme == "" { + req.URL.Scheme = c.httpScheme + } + if req.URL.Host == "" { + req.URL.Host = c.httpHost + } + if req.Body != nil { + // Ensure that the body isn't consumed until the + // server has responded that it will receive it. + // This means that we can retry requests even when we've + // got a consume-once-only io.Reader, such as + // when pushing blobs. + req.Header.Set("Expect", "100-continue") + } + var buf bytes.Buffer + if debug { + fmt.Fprintf(&buf, "client.Do: %s %s {{\n", req.Method, req.URL) + fmt.Fprintf(&buf, "\tBODY: %#v\n", req.Body) + for k, v := range req.Header { + fmt.Fprintf(&buf, "\t%s: %q\n", k, v) + } + c.logf("%s", buf.Bytes()) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("cannot do HTTP request: %w", err) + } + if debug { + buf.Reset() + fmt.Fprintf(&buf, "} -> %s {\n", resp.Status) + for k, v := range resp.Header { + fmt.Fprintf(&buf, "\t%s: %q\n", k, v) + } + data, _ := io.ReadAll(resp.Body) + if len(data) > 0 { + fmt.Fprintf(&buf, "\tBODY: %q\n", data) + } + fmt.Fprintf(&buf, "}}\n") + resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(data)) + c.logf("%s", buf.Bytes()) + } + if len(okStatuses) == 0 && resp.StatusCode == http.StatusOK { + return resp, nil + } + for _, status := range okStatuses { + if resp.StatusCode == status { + return resp, nil + } + } + defer resp.Body.Close() + if !isOKStatus(resp.StatusCode) { + return nil, makeError(resp) + } + return nil, unexpectedStatusError(resp.StatusCode) +} + +func (c *client) logf(f string, a ...any) { + log.Printf("ociclient %s: %s", c.debugID, fmt.Sprintf(f, a...)) +} + +func locationFromResponse(resp *http.Response) (*url.URL, error) { + location := resp.Header.Get("Location") + if location == "" { + return nil, fmt.Errorf("no Location found in response") + } + u, err := url.Parse(location) + if err != nil { + return nil, fmt.Errorf("invalid Location URL found in response") + } + return resp.Request.URL.ResolveReference(u), nil +} + +func isOKStatus(code int) bool { + return code/100 == 2 +} + +func closeOnError(err *error, r io.Closer) { + if *err != nil { + r.Close() + } +} + +func unexpectedStatusError(code int) error { + return fmt.Errorf("unexpected HTTP response code %d", code) +} + +func scopeForRequest(r *ocirequest.Request) ociauth.Scope { + switch r.Kind { + case ocirequest.ReqPing: + return ociauth.Scope{} + case ocirequest.ReqBlobGet, + ocirequest.ReqBlobHead, + ocirequest.ReqManifestGet, + ocirequest.ReqManifestHead, + ocirequest.ReqTagsList, + ocirequest.ReqReferrersList: + return ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: ociauth.TypeRepository, + Resource: r.Repo, + Action: ociauth.ActionPull, + }) + case ocirequest.ReqBlobDelete, + ocirequest.ReqBlobStartUpload, + ocirequest.ReqBlobUploadBlob, + ocirequest.ReqBlobUploadInfo, + ocirequest.ReqBlobUploadChunk, + ocirequest.ReqBlobCompleteUpload, + ocirequest.ReqManifestPut, + ocirequest.ReqManifestDelete: + return ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: ociauth.TypeRepository, + Resource: r.Repo, + Action: ociauth.ActionPush, + }) + case ocirequest.ReqBlobMount: + return ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: ociauth.TypeRepository, + Resource: r.Repo, + Action: ociauth.ActionPush, + }, ociauth.ResourceScope{ + ResourceType: ociauth.TypeRepository, + Resource: r.FromRepo, + Action: ociauth.ActionPull, + }) + case ocirequest.ReqCatalogList: + return ociauth.NewScope(ociauth.CatalogScope) + default: + panic(fmt.Errorf("unexpected request kind %v", r.Kind)) + } +} + +func newRequest(ctx context.Context, rreq *ocirequest.Request, body io.Reader) (*http.Request, error) { + method, u, err := rreq.Construct() + if err != nil { + return nil, err + } + ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{ + RequiredScope: scopeForRequest(rreq), + }) + return http.NewRequestWithContext(ctx, method, u, body) +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go new file mode 100644 index 0000000000..9a66c21b76 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go @@ -0,0 +1,56 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ociclient + +import ( + "context" + "net/http" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" +) + +func (c *client) DeleteBlob(ctx context.Context, repoName string, digest ociregistry.Digest) error { + return c.delete(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqBlobDelete, + Repo: repoName, + Digest: string(digest), + }) +} + +func (c *client) DeleteManifest(ctx context.Context, repoName string, digest ociregistry.Digest) error { + return c.delete(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestDelete, + Repo: repoName, + Digest: string(digest), + }) +} + +func (c *client) DeleteTag(ctx context.Context, repoName string, tagName string) error { + return c.delete(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestDelete, + Repo: repoName, + Tag: tagName, + }) +} + +func (c *client) delete(ctx context.Context, rreq *ocirequest.Request) error { + resp, err := c.doRequest(ctx, rreq, http.StatusAccepted) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go new file mode 100644 index 0000000000..21f8901aa1 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go @@ -0,0 +1,113 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ociclient + +import ( + "encoding/json" + "fmt" + "io" + "mime" + "net/http" + "strings" + + "cuelabs.dev/go/oci/ociregistry" +) + +// errorBodySizeLimit holds the maximum number of response bytes aallowed in +// the server's error response. A typical error message is around 200 +// bytes. Hence, 8 KiB should be sufficient. +const errorBodySizeLimit = 8 * 1024 + +// makeError forms an error from a non-OK response. +// +// It reads but does not close resp.Body. +func makeError(resp *http.Response) error { + var data []byte + var err error + if resp.Body != nil { + data, err = io.ReadAll(io.LimitReader(resp.Body, errorBodySizeLimit+1)) + if err != nil { + err = fmt.Errorf("cannot read error body: %v", err) + } else if len(data) > errorBodySizeLimit { + err = fmt.Errorf("error body too large") + } else { + err = makeError1(resp, data) + } + } + // We always include the status code and response in the error. + return ociregistry.NewHTTPError(err, resp.StatusCode, resp, data) +} + +func makeError1(resp *http.Response, bodyData []byte) error { + if resp.Request.Method == "HEAD" { + // When we've made a HEAD request, we can't see any of + // the actual error, so we'll have to make up something + // from the HTTP status. + // TODO would we do better if we interpreted the HTTP status + // relative to the actual method that was called in order + // to come up with a more plausible error? + var err error + switch resp.StatusCode { + case http.StatusNotFound: + err = ociregistry.ErrNameUnknown + case http.StatusUnauthorized: + err = ociregistry.ErrUnauthorized + case http.StatusForbidden: + err = ociregistry.ErrDenied + case http.StatusTooManyRequests: + err = ociregistry.ErrTooManyRequests + case http.StatusBadRequest: + err = ociregistry.ErrUnsupported + default: + // Our caller will turn this into a non-nil error. + return nil + } + return err + } + if ctype := resp.Header.Get("Content-Type"); !isJSONMediaType(ctype) { + return fmt.Errorf("non-JSON error response %q; body %q", ctype, bodyData) + } + var errs ociregistry.WireErrors + if err := json.Unmarshal(bodyData, &errs); err != nil { + return fmt.Errorf("%s: malformed error response: %v", resp.Status, err) + } + if len(errs.Errors) == 0 { + return fmt.Errorf("%s: no errors in body (probably a server issue)", resp.Status) + } + return &errs +} + +// isJSONMediaType reports whether the content type implies +// that the content is JSON. +func isJSONMediaType(contentType string) bool { + mediaType, _, _ := mime.ParseMediaType(contentType) + m := strings.TrimPrefix(mediaType, "application/") + if len(m) == len(mediaType) { + return false + } + // Look for +json suffix. See https://tools.ietf.org/html/rfc6838#section-4.2.8 + // We recognize multiple suffixes too (e.g. application/something+json+other) + // as that seems to be a possibility. + for { + i := strings.Index(m, "+") + if i == -1 { + return m == "json" + } + if m[0:i] == "json" { + return true + } + m = m[i+1:] + } +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go new file mode 100644 index 0000000000..b629712e2b --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go @@ -0,0 +1,180 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ociclient + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" +) + +func (c *client) Repositories(ctx context.Context, startAfter string) ociregistry.Seq[string] { + return c.pager(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqCatalogList, + ListN: c.listPageSize, + ListLast: startAfter, + }, func(resp *http.Response) ([]string, error) { + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var catalog struct { + Repos []string `json:"repositories"` + } + if err := json.Unmarshal(data, &catalog); err != nil { + return nil, fmt.Errorf("cannot unmarshal catalog response: %v", err) + } + return catalog.Repos, nil + }) +} + +func (c *client) Tags(ctx context.Context, repoName, startAfter string) ociregistry.Seq[string] { + return c.pager(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqTagsList, + Repo: repoName, + ListN: c.listPageSize, + ListLast: startAfter, + }, func(resp *http.Response) ([]string, error) { + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var tagsResponse struct { + Repo string `json:"name"` + Tags []string `json:"tags"` + } + if err := json.Unmarshal(data, &tagsResponse); err != nil { + return nil, fmt.Errorf("cannot unmarshal tags list response: %v", err) + } + return tagsResponse.Tags, nil + }) +} + +func (c *client) Referrers(ctx context.Context, repoName string, digest ociregistry.Digest, artifactType string) ociregistry.Seq[ociregistry.Descriptor] { + // TODO paging + resp, err := c.doRequest(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqReferrersList, + Repo: repoName, + Digest: string(digest), + ListN: c.listPageSize, + }) + if err != nil { + return ociregistry.ErrorSeq[ociregistry.Descriptor](err) + } + + data, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return ociregistry.ErrorSeq[ociregistry.Descriptor](err) + } + var referrersResponse ocispec.Index + if err := json.Unmarshal(data, &referrersResponse); err != nil { + return ociregistry.ErrorSeq[ociregistry.Descriptor](fmt.Errorf("cannot unmarshal referrers response: %v", err)) + } + return ociregistry.SliceSeq(referrersResponse.Manifests) +} + +// pager returns an iterator for a list entry point. It starts by sending the given +// initial request and parses each response into its component items using +// parseResponse. It tries to use the Link header in each response to continue +// the iteration, falling back to using the "last" query parameter. +func (c *client) pager(ctx context.Context, initialReq *ocirequest.Request, parseResponse func(*http.Response) ([]string, error)) ociregistry.Seq[string] { + return func(yield func(string, error) bool) { + // We assume that the same scope is applicable to all page requests. + req, err := newRequest(ctx, initialReq, nil) + if err != nil { + yield("", err) + return + } + for { + resp, err := c.do(req) + if err != nil { + yield("", err) + return + } + items, err := parseResponse(resp) + resp.Body.Close() + if err != nil { + yield("", err) + return + } + // TODO sanity check that items are in lexical order? + for _, item := range items { + if !yield(item, nil) { + return + } + } + if len(items) < initialReq.ListN { + // From the distribution spec: + // The response to such a request MAY return fewer than results, + // but only when the total number of tags attached to the repository + // is less than . + return + } + req, err = nextLink(ctx, resp, initialReq, items[len(items)-1]) + if err != nil { + yield("", fmt.Errorf("invalid Link header in response: %v", err)) + return + } + } + } +} + +// nextLink tries to form a request that can be sent to obtain the next page +// in a set of list results. +// The given response holds the response received from the previous +// list request; initialReq holds the request that initiated the listing, +// and last holds the final item returned in the previous response. +func nextLink(ctx context.Context, resp *http.Response, initialReq *ocirequest.Request, last string) (*http.Request, error) { + link0 := resp.Header.Get("Link") + if link0 == "" { + // This is beyond the first page and there was no Link + // in the previous response (the standard doesn't mandate + // one), so add a "last" parameter to the initial request. + rreq := *initialReq + rreq.ListLast = last + req, err := newRequest(ctx, &rreq, nil) + if err != nil { + // Given that we could form the initial request, this should + // never happen. + return nil, fmt.Errorf("cannot form next request: %v", err) + } + return req, nil + } + // Parse the link header according to RFC 5988. + // TODO perhaps we shouldn't ignore the relation type? + link, ok := strings.CutPrefix(link0, "<") + if !ok { + return nil, fmt.Errorf("no initial < character in Link=%q", link0) + } + link, _, ok = strings.Cut(link, ">") + if !ok { + return nil, fmt.Errorf("no > character in Link=%q", link0) + } + // Parse it with respect to the originating request, as it's probably relative. + linkURL, err := resp.Request.URL.Parse(link) + if err != nil { + return nil, fmt.Errorf("invalid URL in Link=%q", link0) + } + return http.NewRequestWithContext(ctx, "GET", linkURL.String(), nil) +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go new file mode 100644 index 0000000000..68885846e8 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go @@ -0,0 +1,186 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ociclient + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" + "github.com/opencontainers/go-digest" +) + +func (c *client) GetBlob(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.BlobReader, error) { + return c.read(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqBlobGet, + Repo: repo, + Digest: string(digest), + }) +} + +func (c *client) GetBlobRange(ctx context.Context, repo string, digest ociregistry.Digest, o0, o1 int64) (_ ociregistry.BlobReader, _err error) { + if o0 == 0 && o1 < 0 { + return c.GetBlob(ctx, repo, digest) + } + rreq := &ocirequest.Request{ + Kind: ocirequest.ReqBlobGet, + Repo: repo, + Digest: string(digest), + } + req, err := newRequest(ctx, rreq, nil) + if err != nil { + return nil, err + } + if o1 < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", o0)) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", o0, o1-1)) + } + resp, err := c.do(req, http.StatusOK, http.StatusPartialContent) + if err != nil { + return nil, err + } + // TODO this is wrong when the server returns a 200 response. + // Fix that either by returning ErrUnsupported or by reading the whole + // blob and returning only the required portion. + defer closeOnError(&_err, resp.Body) + desc, err := descriptorFromResponse(resp, ociregistry.Digest(rreq.Digest), requireSize) + if err != nil { + return nil, fmt.Errorf("invalid descriptor in response: %v", err) + } + return newBlobReaderUnverified(resp.Body, desc), nil +} + +func (c *client) ResolveBlob(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.Descriptor, error) { + return c.resolve(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqBlobHead, + Repo: repo, + Digest: string(digest), + }) +} + +func (c *client) ResolveManifest(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.Descriptor, error) { + return c.resolve(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestHead, + Repo: repo, + Digest: string(digest), + }) +} + +func (c *client) ResolveTag(ctx context.Context, repo string, tag string) (ociregistry.Descriptor, error) { + return c.resolve(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestHead, + Repo: repo, + Tag: tag, + }) +} + +func (c *client) resolve(ctx context.Context, rreq *ocirequest.Request) (ociregistry.Descriptor, error) { + resp, err := c.doRequest(ctx, rreq) + if err != nil { + return ociregistry.Descriptor{}, err + } + resp.Body.Close() + desc, err := descriptorFromResponse(resp, ociregistry.Digest(rreq.Digest), requireSize|requireDigest) + if err != nil { + return ociregistry.Descriptor{}, fmt.Errorf("invalid descriptor in response: %v", err) + } + return desc, nil +} + +func (c *client) GetManifest(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.BlobReader, error) { + return c.read(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestGet, + Repo: repo, + Digest: string(digest), + }) +} + +func (c *client) GetTag(ctx context.Context, repo string, tagName string) (ociregistry.BlobReader, error) { + return c.read(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqManifestGet, + Repo: repo, + Tag: tagName, + }) +} + +// inMemThreshold holds the maximum number of bytes of manifest content +// that we'll hold in memory to obtain a digest before falling back do +// doing a HEAD request. +// +// This is hopefully large enough to be considerably larger than most +// manifests but small enough to fit comfortably into RAM on most +// platforms. +// +// Note: this is only used when talking to registries that fail to return +// a digest when doing a GET on a tag. +const inMemThreshold = 128 * 1024 + +func (c *client) read(ctx context.Context, rreq *ocirequest.Request) (_ ociregistry.BlobReader, _err error) { + resp, err := c.doRequest(ctx, rreq) + if err != nil { + return nil, err + } + defer closeOnError(&_err, resp.Body) + desc, err := descriptorFromResponse(resp, ociregistry.Digest(rreq.Digest), requireSize) + if err != nil { + return nil, fmt.Errorf("invalid descriptor in response: %v", err) + } + if desc.Digest == "" { + // Returning a digest isn't mandatory according to the spec, and + // at least one registry (AWS's ECR) fails to return a digest + // when doing a GET of a tag. + // We know the request must be a tag-getting + // request because all other requests take a digest not a tag + // but sanity check anyway. + if rreq.Kind != ocirequest.ReqManifestGet { + return nil, fmt.Errorf("internal error: no digest available for non-tag request") + } + + // If the manifest is of a reasonable size, just read it into memory + // and calculate the digest that way, otherwise issue a HEAD + // request which should hopefully (and does in the ECR case) + // give us the digest we need. + if desc.Size <= inMemThreshold { + data, err := io.ReadAll(io.LimitReader(resp.Body, desc.Size+1)) + if err != nil { + return nil, fmt.Errorf("failed to read body to determine digest: %v", err) + } + if int64(len(data)) != desc.Size { + return nil, fmt.Errorf("body size mismatch") + } + desc.Digest = digest.FromBytes(data) + resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(data)) + } else { + rreq1 := rreq + rreq1.Kind = ocirequest.ReqManifestHead + resp1, err := c.doRequest(ctx, rreq1) + if err != nil { + return nil, err + } + resp1.Body.Close() + desc, err = descriptorFromResponse(resp1, ociregistry.Digest(rreq1.Digest), requireSize|requireDigest) + if err != nil { + return nil, err + } + } + } + return newBlobReader(resp.Body, desc), nil +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go new file mode 100644 index 0000000000..47ac6bc4c1 --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go @@ -0,0 +1,433 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ociclient + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + + "github.com/opencontainers/go-digest" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/internal/ocirequest" + "cuelabs.dev/go/oci/ociregistry/ociauth" +) + +// This file implements the ociregistry.Writer methods. + +func (c *client) PushManifest(ctx context.Context, repo string, tag string, contents []byte, mediaType string) (ociregistry.Descriptor, error) { + if mediaType == "" { + return ociregistry.Descriptor{}, fmt.Errorf("PushManifest called with empty mediaType") + } + desc := ociregistry.Descriptor{ + Digest: digest.FromBytes(contents), + Size: int64(len(contents)), + MediaType: mediaType, + } + + rreq := &ocirequest.Request{ + Kind: ocirequest.ReqManifestPut, + Repo: repo, + Tag: tag, + Digest: string(desc.Digest), + } + req, err := newRequest(ctx, rreq, bytes.NewReader(contents)) + if err != nil { + return ociregistry.Descriptor{}, err + } + req.Header.Set("Content-Type", mediaType) + req.ContentLength = desc.Size + resp, err := c.do(req, http.StatusCreated) + if err != nil { + return ociregistry.Descriptor{}, err + } + resp.Body.Close() + return desc, nil +} + +func (c *client) MountBlob(ctx context.Context, fromRepo, toRepo string, dig ociregistry.Digest) (ociregistry.Descriptor, error) { + rreq := &ocirequest.Request{ + Kind: ocirequest.ReqBlobMount, + Repo: toRepo, + FromRepo: fromRepo, + Digest: string(dig), + } + resp, err := c.doRequest(ctx, rreq, http.StatusCreated, http.StatusAccepted) + if err != nil { + return ociregistry.Descriptor{}, err + } + resp.Body.Close() + if resp.StatusCode == http.StatusAccepted { + // Mount isn't supported and technically the upload session has begun, + // but we aren't in a great position to be able to continue it, so let's just + // return Unsupported. + return ociregistry.Descriptor{}, fmt.Errorf("registry does not support mounts: %w", ociregistry.ErrUnsupported) + } + // TODO: is it OK to omit the size from the returned descriptor here? + return descriptorFromResponse(resp, dig, requireDigest) +} + +func (c *client) PushBlob(ctx context.Context, repo string, desc ociregistry.Descriptor, r io.Reader) (_ ociregistry.Descriptor, _err error) { + // TODO use the single-post blob-upload method (ReqBlobUploadBlob) + // See: + // https://github.com/distribution/distribution/issues/4065 + // https://github.com/golang/go/issues/63152 + rreq := &ocirequest.Request{ + Kind: ocirequest.ReqBlobStartUpload, + Repo: repo, + } + req, err := newRequest(ctx, rreq, nil) + if err != nil { + return ociregistry.Descriptor{}, err + } + resp, err := c.do(req, http.StatusAccepted) + if err != nil { + return ociregistry.Descriptor{}, err + } + resp.Body.Close() + location, err := locationFromResponse(resp) + if err != nil { + return ociregistry.Descriptor{}, err + } + + // We've got the upload location. Now PUT the content. + + ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{ + RequiredScope: scopeForRequest(rreq), + }) + // Note: we can't use ocirequest.Request here because that's + // specific to the ociserver implementation in this case. + req, err = http.NewRequestWithContext(ctx, "PUT", "", r) + if err != nil { + return ociregistry.Descriptor{}, err + } + req.URL = urlWithDigest(location, string(desc.Digest)) + req.ContentLength = desc.Size + req.Header.Set("Content-Type", "application/octet-stream") + // TODO: per the spec, the content-range header here is unnecessary. + req.Header.Set("Content-Range", ocirequest.RangeString(0, desc.Size)) + resp, err = c.do(req, http.StatusCreated) + if err != nil { + return ociregistry.Descriptor{}, err + } + defer closeOnError(&_err, resp.Body) + resp.Body.Close() + return desc, nil +} + +// TODO is this a reasonable default? We have to +// weigh up in-memory cost vs round-trip overhead. +// TODO: make this default configurable. +const defaultChunkSize = 64 * 1024 + +func (c *client) PushBlobChunked(ctx context.Context, repo string, chunkSize int) (ociregistry.BlobWriter, error) { + if chunkSize <= 0 { + chunkSize = defaultChunkSize + } + resp, err := c.doRequest(ctx, &ocirequest.Request{ + Kind: ocirequest.ReqBlobStartUpload, + Repo: repo, + }, http.StatusAccepted) + if err != nil { + return nil, err + } + resp.Body.Close() + location, err := locationFromResponse(resp) + if err != nil { + return nil, err + } + ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{ + RequiredScope: ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: "repository", + Resource: repo, + Action: "push", + }), + }) + return &blobWriter{ + ctx: ctx, + client: c, + chunkSize: chunkSizeFromResponse(resp, chunkSize), + chunk: make([]byte, 0, chunkSize), + location: location, + }, nil +} + +func (c *client) PushBlobChunkedResume(ctx context.Context, repo string, id string, offset int64, chunkSize int) (ociregistry.BlobWriter, error) { + if id == "" { + return nil, fmt.Errorf("id must be non-empty to resume a chunked upload") + } + if chunkSize <= 0 { + chunkSize = defaultChunkSize + } + var location *url.URL + switch { + case offset == -1: + // Try to find what offset we're meant to be writing at + // by doing a GET to the location. + // TODO does resuming an upload require push or pull scope or both? + ctx := ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{ + RequiredScope: ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: "repository", + Resource: repo, + Action: "push", + }, ociauth.ResourceScope{ + ResourceType: "repository", + Resource: repo, + Action: "pull", + }), + }) + req, err := http.NewRequestWithContext(ctx, "GET", id, nil) + if err != nil { + return nil, err + } + resp, err := c.do(req, http.StatusNoContent) + if err != nil { + return nil, fmt.Errorf("cannot recover chunk offset: %v", err) + } + location, err = locationFromResponse(resp) + if err != nil { + return nil, fmt.Errorf("cannot get location from response: %v", err) + } + rangeStr := resp.Header.Get("Range") + p0, p1, ok := ocirequest.ParseRange(rangeStr) + if !ok { + return nil, fmt.Errorf("invalid range %q in response", rangeStr) + } + if p0 != 0 { + return nil, fmt.Errorf("range %q does not start with 0", rangeStr) + } + chunkSize = chunkSizeFromResponse(resp, chunkSize) + offset = p1 + case offset < 0: + return nil, fmt.Errorf("invalid offset; must be -1 or non-negative") + default: + var err error + location, err = url.Parse(id) // Note that this mirrors [BlobWriter.ID]. + if err != nil { + return nil, fmt.Errorf("provided ID is not a valid location URL") + } + if !strings.HasPrefix(location.Path, "/") { + // Our BlobWriter.ID method always returns a fully + // qualified absolute URL, so this must be a mistake + // on the part of the caller. + // We allow a relative URL even though we don't + // ever return one to make things a bit easier for tests. + return nil, fmt.Errorf("provided upload ID %q has unexpected relative URL path", id) + } + } + ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{ + RequiredScope: ociauth.NewScope(ociauth.ResourceScope{ + ResourceType: "repository", + Resource: repo, + Action: "push", + }), + }) + return &blobWriter{ + ctx: ctx, + client: c, + chunkSize: chunkSize, + size: offset, + flushed: offset, + location: location, + }, nil +} + +type blobWriter struct { + client *client + chunkSize int + ctx context.Context + + // mu guards the fields below it. + mu sync.Mutex + closed bool + chunk []byte + closeErr error + + // size holds the size of the entire upload as seen from the + // client perspective. Each call to Write increases this immediately. + size int64 + + // flushed holds the size of the upload as flushed to the server. + // Each successfully flushed chunk increases this. + flushed int64 + location *url.URL +} + +func (w *blobWriter) Write(buf []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // We use > rather than >= here so that using a chunk size of 100 + // and writing 100 bytes does not actually flush, which would result in a PATCH + // then followed by an empty-bodied PUT with the call to Commit. + // Instead, we want the writes to not flush at all, and Commit to PUT the entire chunk. + if len(w.chunk)+len(buf) > w.chunkSize { + if err := w.flush(buf, ""); err != nil { + return 0, err + } + } else { + if w.chunk == nil { + w.chunk = make([]byte, 0, w.chunkSize) + } + w.chunk = append(w.chunk, buf...) + } + w.size += int64(len(buf)) + return len(buf), nil +} + +// flush flushes any outstanding upload data to the server. +// If commitDigest is non-empty, this is the final segment of data in the blob: +// the blob is being committed and the digest should hold the digest of the entire blob content. +func (w *blobWriter) flush(buf []byte, commitDigest ociregistry.Digest) error { + if commitDigest == "" && len(buf)+len(w.chunk) == 0 { + return nil + } + // Start a new PATCH request to send the currently outstanding data. + method := "PATCH" + expect := http.StatusAccepted + reqURL := w.location + if commitDigest != "" { + // This is the final piece of data, so send it as the final PUT request + // (committing the whole blob) which avoids an extra round trip. + method = "PUT" + expect = http.StatusCreated + reqURL = urlWithDigest(reqURL, string(commitDigest)) + } + req, err := http.NewRequestWithContext(w.ctx, method, "", concatBody(w.chunk, buf)) + if err != nil { + return fmt.Errorf("cannot make PATCH request: %v", err) + } + req.URL = reqURL + req.ContentLength = int64(len(w.chunk) + len(buf)) + // TODO: per the spec, the content-range header here is unnecessary + // if we are doing a final PUT without a body. + req.Header.Set("Content-Range", ocirequest.RangeString(w.flushed, w.flushed+req.ContentLength)) + resp, err := w.client.do(req, expect) + if err != nil { + return err + } + resp.Body.Close() + location, err := locationFromResponse(resp) + if err != nil { + return fmt.Errorf("bad Location in response: %v", err) + } + // TODO is there something we could be doing with the Range header in the response? + w.location = location + w.flushed += req.ContentLength + w.chunk = w.chunk[:0] + return nil +} + +func concatBody(b1, b2 []byte) io.Reader { + if len(b1)+len(b2) == 0 { + return nil // note that net/http treats a nil request body differently + } + if len(b1) == 0 { + return bytes.NewReader(b2) + } + if len(b2) == 0 { + return bytes.NewReader(b1) + } + return io.MultiReader( + bytes.NewReader(b1), + bytes.NewReader(b2), + ) +} + +func (w *blobWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return w.closeErr + } + err := w.flush(nil, "") + w.closed = true + w.closeErr = err + return err +} + +func (w *blobWriter) Size() int64 { + w.mu.Lock() + defer w.mu.Unlock() + return w.size +} + +func (w *blobWriter) ChunkSize() int { + return w.chunkSize +} + +func (w *blobWriter) ID() string { + w.mu.Lock() + defer w.mu.Unlock() + return w.location.String() +} + +func (w *blobWriter) Commit(digest ociregistry.Digest) (ociregistry.Descriptor, error) { + if digest == "" { + return ociregistry.Descriptor{}, fmt.Errorf("cannot commit with an empty digest") + } + w.mu.Lock() + defer w.mu.Unlock() + if err := w.flush(nil, digest); err != nil { + return ociregistry.Descriptor{}, fmt.Errorf("cannot flush data before commit: %w", err) + } + return ociregistry.Descriptor{ + MediaType: "application/octet-stream", + Size: w.size, + Digest: digest, + }, nil +} + +func (w *blobWriter) Cancel() error { + return nil +} + +// urlWithDigest returns u with the digest query parameter set, taking care not +// to disrupt the initial URL (thus avoiding the charge of "manually +// assembing the location; see [here]. +// +// [here]: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#post-then-put +func urlWithDigest(u0 *url.URL, digest string) *url.URL { + u := *u0 + digest = url.QueryEscape(digest) + switch { + case u.ForceQuery: + // The URL already ended in a "?" with no actual query parameters. + u.RawQuery = "digest=" + digest + u.ForceQuery = false + case u.RawQuery != "": + // There's already a query parameter present. + u.RawQuery += "&digest=" + digest + default: + u.RawQuery = "digest=" + digest + } + return &u +} + +// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks +func chunkSizeFromResponse(resp *http.Response, chunkSize int) int { + minChunkSize, err := strconv.Atoi(resp.Header.Get("OCI-Chunk-Min-Length")) + if err == nil && minChunkSize > chunkSize { + return minChunkSize + } + return chunkSize +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go new file mode 100644 index 0000000000..650b041a0d --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go @@ -0,0 +1,233 @@ +// Copyright 2023 CUE Labs AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ociref supports parsing cross-registry OCI registry references. +package ociref + +import ( + "fmt" + "regexp" + "strings" + + "cuelabs.dev/go/oci/ociregistry" +) + +// The following regular expressions derived from code in the +// [github.com/distribution/distribution/v3/reference] package. +const ( + // alphanumeric defines the alphanumeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphanumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allows one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + // TODO the distribution spec doesn't allow these variations. + separator = `(?:[._]|__|[-]+)` + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = `(?:\[[a-fA-F0-9:]+\])` + + // optionalPort matches an optional port-number including the port separator + // (e.g. ":80"). + port = `[0-9]+` + + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + // + // Note: we purposely exclude domain names without dots here, + // because otherwise we can't tell if the first component is + // a host name or not when it doesn't have a port. + // When it does have a port, the distinction is clear. + // + domainName = `(?:` + domainNameComponent + `(?:\.` + domainNameComponent + `)+` + `)` + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + // Note: that we require the port when the host name looks like a regular + // name component. + domainAndPort = `(?:` + host + `(?:` + `:` + port + `)?` + `|` + domainNameComponent + `:` + port + `)` + + // pathComponent restricts path-components to start with an alphanumeric + // character, with following parts able to be separated by a separator + // (one period, one or two underscore and multiple dashes). + pathComponent = `(?:` + alphanumeric + `(?:` + separator + alphanumeric + `)*` + `)` + + // repoName matches the name of a repository. It consists of one + // or more forward slash (/) delimited path-components: + // + // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" + repoName = pathComponent + `(?:` + `/` + pathComponent + `)*` +) + +var referencePat = regexp.MustCompile( + `^(?:` + + `(?:` + `(` + domainAndPort + `)` + `/` + `)?` + // capture 1: host + `(` + repoName + `)` + // capture 2: repository name + `(?:` + `:([^@]+))?` + // capture 3: tag; rely on Go logic to test validity. + `(?:` + `@(.+))?` + // capture 4: digest; rely on go-digest to find issues + `)$`, +) + +var hostPat = regexp.MustCompile(`^(?:` + domainAndPort + `)$`) +var repoPat = regexp.MustCompile(`^(?:` + repoName + `)$`) + +// Reference represents an entry in an OCI repository. +type Reference struct { + // Host holds the host name of the registry + // within which the repository is stored, optionally in + // the form host:port. This might be empty. + Host string + + // Repository holds the repository name. + Repository string + + // Tag holds the TAG part of a :TAG or :TAG@DIGEST reference. + // When Digest is set as well as Tag, the tag will be verified + // to exist and have the expected digest. + Tag string + + // Digest holds the DIGEST part of an @DIGEST reference + // or of a :TAG@DIGEST reference. + Digest ociregistry.Digest +} + +// IsValidHost reports whether s is a valid host (or host:port) part of a reference string. +func IsValidHost(s string) bool { + return hostPat.MatchString(s) +} + +// IsValidHost reports whether s is a valid repository part +// of a reference string. +func IsValidRepository(s string) bool { + return repoPat.MatchString(s) +} + +// IsValidTag reports whether s is a valid reference tag. +func IsValidTag(s string) bool { + return checkTag(s) == nil +} + +// Parse parses a reference string that must include +// a host name (or host:port pair) component. +// +// It is represented in string form as HOST[:PORT]/NAME[:TAG|@DIGEST] +// form: the same syntax accepted by "docker pull". +// Unlike "docker pull" however, there is no default registry: when +// presented with a bare repository name, Parse will return an error. +func Parse(refStr string) (Reference, error) { + ref, err := ParseRelative(refStr) + if err != nil { + return Reference{}, err + } + if ref.Host == "" { + return Reference{}, fmt.Errorf("reference does not contain host name") + } + return ref, nil +} + +// ParseRelative parses a reference string that may +// or may not include a host name component. +// +// It is represented in string form as [HOST[:PORT]/]NAME[:TAG|@DIGEST] +// form: the same syntax accepted by "docker pull". +// Unlike "docker pull" however, there is no default registry: when +// presented with a bare repository name, the Host field will be empty. +func ParseRelative(refStr string) (Reference, error) { + m := referencePat.FindStringSubmatch(refStr) + if m == nil { + return Reference{}, fmt.Errorf("invalid reference syntax (%q)", refStr) + } + var ref Reference + ref.Host, ref.Repository, ref.Tag, ref.Digest = m[1], m[2], m[3], ociregistry.Digest(m[4]) + // Check lengths and digest: we don't check these as part of the regexp + // because it's more efficient to do it in Go and we get + // nicer error messages as a result. + if len(ref.Digest) > 0 { + if err := ref.Digest.Validate(); err != nil { + return Reference{}, fmt.Errorf("invalid digest %q: %v", ref.Digest, err) + } + } + if len(ref.Tag) > 0 { + if err := checkTag(ref.Tag); err != nil { + return Reference{}, err + } + } + if len(ref.Repository) > 255 { + return Reference{}, fmt.Errorf("repository name too long") + } + return ref, nil +} + +func checkTag(s string) error { + if len(s) > 128 { + return fmt.Errorf("tag too long") + } + if !isWord(s[0]) { + return fmt.Errorf("tag %q does not start with word character", s) + } + for i := 1; i < len(s); i++ { + c := s[i] + if !isWord(c) && c != '.' && c != '-' { + return fmt.Errorf("tag %q contains invalid invalid character %q", s, c) + } + } + return nil +} + +func isWord(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') +} + +// String returns the string form of a reference in the form +// +// [HOST/]NAME[:TAG|@DIGEST] +func (ref Reference) String() string { + var buf strings.Builder + buf.Grow(len(ref.Host) + 1 + len(ref.Repository) + 1 + len(ref.Tag) + 1 + len(ref.Digest)) + if ref.Host != "" { + buf.WriteString(ref.Host) + buf.WriteByte('/') + } + buf.WriteString(ref.Repository) + if len(ref.Tag) > 0 { + buf.WriteByte(':') + buf.WriteString(ref.Tag) + } + if len(ref.Digest) > 0 { + buf.WriteByte('@') + buf.WriteString(string(ref.Digest)) + } + return buf.String() +} diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/valid.go b/vendor/cuelabs.dev/go/oci/ociregistry/valid.go new file mode 100644 index 0000000000..7486fa195a --- /dev/null +++ b/vendor/cuelabs.dev/go/oci/ociregistry/valid.go @@ -0,0 +1,30 @@ +package ociregistry + +import ( + "regexp" + + "github.com/opencontainers/go-digest" +) + +var ( + tagPattern = regexp.MustCompile(`^[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}$`) + repoNamePattern = regexp.MustCompile(`^[a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*$`) +) + +// IsValidRepoName reports whether the given repository +// name is valid according to the specification. +func IsValidRepoName(repoName string) bool { + return repoNamePattern.MatchString(repoName) +} + +// IsValidTag reports whether the digest d is valid +// according to the specification. +func IsValidTag(tag string) bool { + return tagPattern.MatchString(tag) +} + +// IsValidDigest reports whether the digest d is well formed. +func IsValidDigest(d string) bool { + _, err := digest.Parse(d) + return err == nil +} diff --git a/vendor/cuelang.org/go/AUTHORS b/vendor/cuelang.org/go/AUTHORS deleted file mode 100644 index 884392fca0..0000000000 --- a/vendor/cuelang.org/go/AUTHORS +++ /dev/null @@ -1,6 +0,0 @@ -# This is the list of CUE authors for copyright purposes. -# -# This does not necessarily list everyone who has contributed code, since in -# some cases, their employer may be the copyright holder. To see the full list -# of contributors, see the revision history in source control. -Google LLC diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go index 0f1e2afae3..b47a33d852 100644 --- a/vendor/cuelang.org/go/cue/ast/ast.go +++ b/vendor/cuelang.org/go/cue/ast/ast.go @@ -14,7 +14,7 @@ // Package ast declares the types used to represent syntax trees for CUE // packages. -package ast // import "cuelang.org/go/cue/ast" +package ast import ( "fmt" @@ -48,10 +48,10 @@ type Node interface { // the node or nil if there is no such position. pos() *token.Pos - // Deprecated: use ast.Comments + // Deprecated: use [Comments] Comments() []*CommentGroup - // Deprecated: use ast.AddComment + // Deprecated: use [AddComment] AddComment(*CommentGroup) commentInfo() *comments } @@ -112,7 +112,7 @@ type decl struct{} func (decl) declNode() {} -// A Label is any production that can be used as a LHS label. +// A Label is any production that can be used as an LHS label. type Label interface { Node labelNode() @@ -394,7 +394,7 @@ type BottomLit struct { expr } -// An Ident node represents an left-hand side identifier, +// An Ident node represents a left-hand side identifier, // including the underscore "_" identifier to represent top. type Ident struct { NamePos token.Pos // identifier position @@ -951,7 +951,7 @@ func (d *EmbedDecl) End() token.Pos { return d.Expr.End() } // ---------------------------------------------------------------------------- // Files and packages -// A File node represents a Go source file. +// A File node represents a CUE source file. // // The Comments list contains all comments in the source file in order of // appearance, including the comments that are pointed to from other nodes diff --git a/vendor/cuelang.org/go/cue/ast/astutil/apply.go b/vendor/cuelang.org/go/cue/ast/astutil/apply.go index 1c43629e23..08bb236a65 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/apply.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/apply.go @@ -199,19 +199,6 @@ type applyVisitor interface { // Helper functions for common node lists. They may be empty. -func applyExprList(v applyVisitor, parent Cursor, list []ast.Expr) { - c := newCursor(parent, nil, nil) - for i, x := range list { - c.index = i - c.node = x - c.typ = &list[i] - applyCursor(v, c) - if x != c.node { - list[i] = c.node.(ast.Expr) - } - } -} - type declsCursor struct { *cursor decls, after, process []ast.Decl @@ -295,6 +282,19 @@ func apply[N ast.Node](v applyVisitor, parent Cursor, nodePtr *N) { } } +func applyList[N ast.Node](v applyVisitor, parent Cursor, list []N) { + c := newCursor(parent, nil, nil) + for i, node := range list { + c.index = i + c.node = node + c.typ = &list[i] + applyCursor(v, c) + if ast.Node(node) != c.node { + list[i] = c.node.(N) + } + } +} + // applyCursor traverses an AST in depth-first order: It starts by calling // v.Visit(node); node must not be nil. If the visitor w returned by // v.Visit(node) is not nil, apply is invoked recursively with visitor @@ -309,10 +309,7 @@ func applyCursor(v applyVisitor, c Cursor) { // TODO: record the comment groups and interleave with the values like for // parsing and printing? - comments := node.Comments() - for _, cm := range comments { - apply(v, c, &cm) - } + applyList(v, c, ast.Comments(node)) // apply children // (the order of the cases matches the order @@ -323,9 +320,7 @@ func applyCursor(v applyVisitor, c Cursor) { // nothing to do case *ast.CommentGroup: - for _, cg := range n.List { - apply(v, c, &cg) - } + applyList(v, c, n.List) case *ast.Attribute: // nothing to do @@ -335,9 +330,7 @@ func applyCursor(v applyVisitor, c Cursor) { if n.Value != nil { apply(v, c, &n.Value) } - for _, a := range n.Attrs { - apply(v, c, &a) - } + applyList(v, c, n.Attrs) case *ast.StructLit: n.Elts = applyDeclList(v, c, n.Elts) @@ -347,10 +340,10 @@ func applyCursor(v applyVisitor, c Cursor) { // nothing to do case *ast.Interpolation: - applyExprList(v, c, n.Elts) + applyList(v, c, n.Elts) case *ast.ListLit: - applyExprList(v, c, n.Elts) + applyList(v, c, n.Elts) case *ast.Ellipsis: if n.Type != nil { @@ -379,7 +372,7 @@ func applyCursor(v applyVisitor, c Cursor) { case *ast.CallExpr: apply(v, c, &n.Fun) - applyExprList(v, c, n.Args) + applyList(v, c, n.Args) case *ast.UnaryExpr: apply(v, c, &n.X) @@ -399,9 +392,7 @@ func applyCursor(v applyVisitor, c Cursor) { // nothing to do case *ast.ImportDecl: - for _, s := range n.Specs { - apply(v, c, &s) - } + applyList(v, c, n.Specs) case *ast.EmbedDecl: apply(v, c, &n.Expr) @@ -415,10 +406,7 @@ func applyCursor(v applyVisitor, c Cursor) { apply(v, c, &n.Expr) case *ast.Comprehension: - clauses := n.Clauses - for i := range n.Clauses { - apply(v, c, &clauses[i]) - } + applyList(v, c, n.Clauses) apply(v, c, &n.Value) // Files and packages @@ -462,7 +450,7 @@ func (f *applier) Before(c Cursor) applyVisitor { node := c.Node() if f.before == nil || (f.before(c) && node == c.Node()) { f.commentStack = append(f.commentStack, f.current) - f.current = commentFrame{cg: node.Comments()} + f.current = commentFrame{cg: ast.Comments(node)} f.visitComments(c, f.current.pos) return f } diff --git a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go index cdcdfce331..9f1d020ed3 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go @@ -75,14 +75,14 @@ type ErrFunc func(pos token.Pos, msg string, args ...interface{}) // Resolve resolves all identifiers in a file. Unresolved identifiers are // recorded in Unresolved. It will not overwrite already resolved values. func Resolve(f *ast.File, errFn ErrFunc) { - walk(&scope{errFn: errFn, identFn: resolveIdent}, f) + walkVisitor(f, &scope{errFn: errFn, identFn: resolveIdent}) } // Resolve resolves all identifiers in an expression. // It will not overwrite already resolved values. func ResolveExpr(e ast.Expr, errFn ErrFunc) { f := &ast.File{} - walk(&scope{file: f, errFn: errFn, identFn: resolveIdent}, e) + walkVisitor(e, &scope{file: f, errFn: errFn, identFn: resolveIdent}) } // A Scope maintains the set of named language entities declared @@ -122,13 +122,9 @@ func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope label := x.Label if a, ok := x.Label.(*ast.Alias); ok { - // TODO(legacy): use name := a.Ident.Name once quoted - // identifiers are no longer supported. - label, _ = a.Expr.(ast.Label) - if name, _, _ := ast.LabelName(a.Ident); name != "" { - if _, ok := label.(*ast.ListLit); !ok { - s.insert(name, x, a) - } + name := a.Ident.Name + if _, ok := a.Expr.(*ast.ListLit); !ok { + s.insert(name, x, a) } } @@ -260,7 +256,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { s := newScope(x, s, x, x.Decls) // Support imports. for _, d := range x.Decls { - walk(s, d) + walkVisitor(d, s) } return nil @@ -269,7 +265,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { case *ast.Comprehension: s = scopeClauses(s, x.Clauses) - walk(s, x.Value) + walkVisitor(x.Value, s) return nil case *ast.Field: @@ -281,10 +277,10 @@ func (s *scope) Before(n ast.Node) (w visitor) { switch label := n.(type) { case *ast.ParenExpr: - walk(s, label) + walkVisitor(label, s) case *ast.Interpolation: - walk(s, label) + walkVisitor(label, s) case *ast.ListLit: if len(label.Elts) != 1 { @@ -308,10 +304,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { // illegal name clashes, and it allows giving better error // messages. This puts the burden on clients of this library // to detect illegal usage, though. - name, err := ast.ParseIdent(a.Ident) - if err == nil { - s.insert(name, a.Expr, a) - } + s.insert(a.Ident.Name, a.Expr, a) } ast.Walk(expr, nil, func(n ast.Node) { @@ -324,7 +317,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { } } }) - walk(s, expr) + walkVisitor(expr, s) } if n := x.Value; n != nil { @@ -336,7 +329,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { n = alias.Expr } s.inField = true - walk(s, n) + walkVisitor(n, s) s.inField = false } @@ -349,7 +342,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { delete(s.index, name) // The same name may still appear in another scope if x.Expr != nil { - walk(s, x.Expr) + walkVisitor(x.Expr, s) } s.index[name] = saved return nil @@ -361,7 +354,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { delete(s.index, name) // The same name may still appear in another scope if x.Expr != nil { - walk(s, x.Expr) + walkVisitor(x.Expr, s) } s.index[name] = saved return nil @@ -374,7 +367,7 @@ func (s *scope) Before(n ast.Node) (w visitor) { // that resolve in a list. case *ast.SelectorExpr: - walk(s, x.X) + walkVisitor(x.X, s) return nil case *ast.Ident: @@ -392,12 +385,12 @@ func resolveIdent(s *scope, x *ast.Ident) bool { return false } if _, obj, node := s.lookup(name); node.node != nil { - switch { - case x.Node == nil: + switch x.Node { + case nil: x.Node = node.node x.Scope = obj - case x.Node == node.node: + case node.node: x.Scope = obj default: // x.Node != node @@ -417,29 +410,20 @@ func scopeClauses(s *scope, clauses []ast.Clause) *scope { for _, c := range clauses { switch x := c.(type) { case *ast.ForClause: - walk(s, x.Source) + walkVisitor(x.Source, s) s = newScope(s.file, s, x, nil) if x.Key != nil { - name, err := ast.ParseIdent(x.Key) - if err == nil { - s.insert(name, x.Key, x) - } - } - name, err := ast.ParseIdent(x.Value) - if err == nil { - s.insert(name, x.Value, x) + s.insert(x.Key.Name, x.Key, x) } + s.insert(x.Value.Name, x.Value, x) case *ast.LetClause: - walk(s, x.Expr) + walkVisitor(x.Expr, s) s = newScope(s.file, s, x, nil) - name, err := ast.ParseIdent(x.Ident) - if err == nil { - s.insert(name, x.Ident, x) - } + s.insert(x.Ident.Name, x.Ident, x) default: - walk(s, c) + walkVisitor(c, s) } } return s diff --git a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go index 55ffa1fd9a..c0cfee8d8e 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go @@ -31,7 +31,7 @@ import ( // - change a predeclared identifier reference to use the __ident form, // instead of introducing an alias. -// Sanitize rewrites File f in place to be well formed after automated +// Sanitize rewrites File f in place to be well-formed after automated // construction of an AST. // // Rewrites: @@ -50,11 +50,11 @@ func Sanitize(f *ast.File) error { } // Gather all names. - walk(&scope{ + walkVisitor(f, &scope{ errFn: z.errf, nameFn: z.addName, identFn: z.markUsed, - }, f) + }) if z.errs != nil { return z.errs } @@ -67,7 +67,7 @@ func Sanitize(f *ast.File) error { index: make(map[string]entry), } z.fileScope = s - walk(s, f) + walkVisitor(f, s) if z.errs != nil { return z.errs } @@ -317,7 +317,7 @@ func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { } // uniqueName returns a new name globally unique name of the form -// base_XX ... base_XXXXXXXXXXXXXX or _base or the same pattern with a '_' +// base_NN ... base_NNNNNNNNNNNNNN or _base or the same pattern with a '_' // prefix if hidden is true. // // It prefers short extensions over large ones, while ensuring the likelihood of diff --git a/vendor/cuelang.org/go/cue/ast/astutil/util.go b/vendor/cuelang.org/go/cue/ast/astutil/util.go index 539d7e3256..2af00db95a 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/util.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/util.go @@ -78,7 +78,7 @@ func CopyComments(to, from ast.Node) { if from == nil { return } - ast.SetComments(to, from.Comments()) + ast.SetComments(to, ast.Comments(from)) } // CopyPosition sets the position of one node to another. @@ -95,7 +95,7 @@ func CopyMeta(to, from ast.Node) ast.Node { if from == nil { return to } - ast.SetComments(to, from.Comments()) + ast.SetComments(to, ast.Comments(from)) ast.SetPos(to, from.Pos()) return to } diff --git a/vendor/cuelang.org/go/cue/ast/astutil/walk.go b/vendor/cuelang.org/go/cue/ast/astutil/walk.go index d6bc9b24cf..6175a98af2 100644 --- a/vendor/cuelang.org/go/cue/ast/astutil/walk.go +++ b/vendor/cuelang.org/go/cue/ast/astutil/walk.go @@ -14,186 +14,41 @@ package astutil -import ( - "fmt" +import "cuelang.org/go/cue/ast" - "cuelang.org/go/cue/ast" -) - -// TODO: use ast.Walk or adopt that version to allow visitors. - -// A visitor's before method is invoked for each node encountered by Walk. -// If the result visitor w is not nil, Walk visits each of the children -// of node with the visitor w, followed by a call of w.After. -type visitor interface { - Before(node ast.Node) (w visitor) - After(node ast.Node) +// walkVisitor traverses an AST in depth-first order with a [visitor]. +// +// TODO(mvdan): refactor away the need for walkVisitor; +// Resolve and Sanitize should be able to use ast.Walk directly. +func walkVisitor(node ast.Node, v visitor) { + sv := &stackVisitor{stack: []visitor{v}} + ast.Walk(node, sv.Before, sv.After) } -// Helper functions for common node lists. They may be empty. - -func walkExprList(v visitor, list []ast.Expr) { - for _, x := range list { - walk(v, x) - } +// stackVisitor helps implement visitor support on top of ast.Walk. +type stackVisitor struct { + stack []visitor } -func walkDeclList(v visitor, list []ast.Decl) { - for _, x := range list { - walk(v, x) +func (v *stackVisitor) Before(node ast.Node) bool { + current := v.stack[len(v.stack)-1] + next := current.Before(node) + if next == nil { + return false } + v.stack = append(v.stack, next) + return true } -// walk traverses an AST in depth-first order: It starts by calling -// v.Visit(node); node must not be nil. If the visitor w returned by -// v.Visit(node) is not nil, walk is invoked recursively with visitor -// w for each of the non-nil children of node, followed by a call of -// w.Visit(nil). -func walk(v visitor, node ast.Node) { - if v = v.Before(node); v == nil { - return - } - - // TODO: record the comment groups and interleave with the values like for - // parsing and printing? - for _, c := range node.Comments() { - walk(v, c) - } - - // walk children - // (the order of the cases matches the order - // of the corresponding node types in go) - switch n := node.(type) { - // Comments and fields - case *ast.Comment: - // nothing to do - - case *ast.CommentGroup: - for _, c := range n.List { - walk(v, c) - } - - case *ast.Attribute: - // nothing to do - - case *ast.Field: - walk(v, n.Label) - if n.Value != nil { - walk(v, n.Value) - } - for _, a := range n.Attrs { - walk(v, a) - } - - case *ast.Func: - walkExprList(v, n.Args) - walk(v, n.Ret) - - case *ast.StructLit: - for _, f := range n.Elts { - walk(v, f) - } - - // Expressions - case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit: - // nothing to do - - case *ast.Interpolation: - for _, e := range n.Elts { - walk(v, e) - } - - case *ast.ListLit: - walkExprList(v, n.Elts) - - case *ast.Ellipsis: - if n.Type != nil { - walk(v, n.Type) - } - - case *ast.ParenExpr: - walk(v, n.X) - - case *ast.SelectorExpr: - walk(v, n.X) - walk(v, n.Sel) - - case *ast.IndexExpr: - walk(v, n.X) - walk(v, n.Index) - - case *ast.SliceExpr: - walk(v, n.X) - if n.Low != nil { - walk(v, n.Low) - } - if n.High != nil { - walk(v, n.High) - } - - case *ast.CallExpr: - walk(v, n.Fun) - walkExprList(v, n.Args) - - case *ast.UnaryExpr: - walk(v, n.X) - - case *ast.BinaryExpr: - walk(v, n.X) - walk(v, n.Y) - - // Declarations - case *ast.ImportSpec: - if n.Name != nil { - walk(v, n.Name) - } - walk(v, n.Path) - - case *ast.BadDecl: - // nothing to do - - case *ast.ImportDecl: - for _, s := range n.Specs { - walk(v, s) - } - - case *ast.EmbedDecl: - walk(v, n.Expr) - - case *ast.Alias: - walk(v, n.Ident) - walk(v, n.Expr) - - case *ast.Comprehension: - for _, c := range n.Clauses { - walk(v, c) - } - walk(v, n.Value) - - // Files and packages - case *ast.File: - walkDeclList(v, n.Decls) - - case *ast.Package: - // The package identifier isn't really an identifier. Skip it. - - case *ast.LetClause: - walk(v, n.Ident) - walk(v, n.Expr) - - case *ast.ForClause: - if n.Key != nil { - walk(v, n.Key) - } - walk(v, n.Value) - walk(v, n.Source) - - case *ast.IfClause: - walk(v, n.Condition) - - default: - panic(fmt.Sprintf("Walk: unexpected node type %T", n)) - } +func (v *stackVisitor) After(node ast.Node) { + v.stack[len(v.stack)-1] = nil // set visitor to nil so it can be garbage collected + v.stack = v.stack[:len(v.stack)-1] +} - v.After(node) +// A visitor's before method is invoked for each node encountered by Walk. +// If the result visitor w is true, Walk visits each of the children +// of node with the visitor w, followed by a call of w.After. +type visitor interface { + Before(node ast.Node) (w visitor) + After(node ast.Node) } diff --git a/vendor/cuelang.org/go/cue/ast/ident.go b/vendor/cuelang.org/go/cue/ast/ident.go index dfbfee05a3..66f814c813 100644 --- a/vendor/cuelang.org/go/cue/ast/ident.go +++ b/vendor/cuelang.org/go/cue/ast/ident.go @@ -70,61 +70,6 @@ func IsValidIdent(ident string) bool { return true } -// ParseIdent unquotes a possibly quoted identifier and validates -// if the result is valid. -// -// Deprecated: quoted identifiers are deprecated. Use aliases. -func ParseIdent(n *Ident) (string, error) { - return parseIdent(n.NamePos, n.Name) -} - -func parseIdent(pos token.Pos, ident string) (string, error) { - if ident == "" { - return "", errors.Newf(pos, "empty identifier") - } - quoted := false - if ident[0] == '`' { - u, err := strconv.Unquote(ident) - if err != nil { - return "", errors.Newf(pos, "invalid quoted identifier") - } - ident = u - quoted = true - } - - p := 0 - if strings.HasPrefix(ident, "_") { - p++ - if len(ident) == 1 { - return ident, nil - } - } - if strings.HasPrefix(ident[p:], "#") { - p++ - // if len(ident) == p { - // return "", errors.Newf(pos, "invalid identifier '_#'") - // } - } - - if p == 0 || ident[p-1] == '#' { - if r, _ := utf8.DecodeRuneInString(ident[p:]); isDigit(r) { - return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) - } - } - - for _, r := range ident[p:] { - if isLetter(r) || isDigit(r) || r == '_' || r == '$' { - continue - } - if r == '-' && quoted { - continue - } - return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) - } - - return ident, nil -} - // LabelName reports the name of a label, whether it is an identifier // (it binds a value to a scope), and whether it is valid. // Keywords that are allowed in label positions are interpreted accordingly. @@ -150,14 +95,11 @@ func LabelName(l Label) (name string, isIdent bool, err error) { "cannot reference fields with square brackets labels outside the field value") case *Ident: - // TODO(legacy): use name = n.Name - name, err = ParseIdent(n) - if err != nil { - return "", false, err + name = n.Name + if !IsValidIdent(name) { + return "", false, errors.Newf(l.Pos(), "invalid identifier") } - isIdent = true - // TODO(legacy): remove this return once quoted identifiers are removed. - return name, isIdent, err + return name, true, err case *BasicLit: switch n.Kind { @@ -178,17 +120,13 @@ func LabelName(l Label) (name string, isIdent bool, err error) { return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), "cannot use numbers as fields") } + return name, isIdent, err default: // This includes interpolation and template labels. return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), "label is an expression") } - if !IsValidIdent(name) { - isIdent = false - } - return name, isIdent, err - } // ErrIsExpression reports whether a label is an expression. diff --git a/vendor/cuelang.org/go/cue/ast/walk.go b/vendor/cuelang.org/go/cue/ast/walk.go index 571d193916..53429f975d 100644 --- a/vendor/cuelang.org/go/cue/ast/walk.go +++ b/vendor/cuelang.org/go/cue/ast/walk.go @@ -16,55 +16,26 @@ package ast import ( "fmt" - - "cuelang.org/go/cue/token" ) +func walkList[N Node](list []N, before func(Node) bool, after func(Node)) { + for _, node := range list { + Walk(node, before, after) + } +} + // Walk traverses an AST in depth-first order: It starts by calling f(node); // node must not be nil. If before returns true, Walk invokes f recursively for // each of the non-nil children of node, followed by a call of after. Both // functions may be nil. If before is nil, it is assumed to always return true. func Walk(node Node, before func(Node) bool, after func(Node)) { - walk(&inspector{before: before, after: after}, node) -} - -// A visitor's before method is invoked for each node encountered by Walk. -// If the result visitor w is true, Walk visits each of the children -// of node with the visitor w, followed by a call of w.After. -type visitor interface { - Before(node Node) (w visitor) - After(node Node) -} - -// Helper functions for common node lists. They may be empty. - -func walkExprList(v visitor, list []Expr) { - for _, x := range list { - walk(v, x) - } -} - -func walkDeclList(v visitor, list []Decl) { - for _, x := range list { - walk(v, x) - } -} - -// walk traverses an AST in depth-first order: It starts by calling -// v.Visit(node); node must not be nil. If the visitor w returned by -// v.Visit(node) is not nil, walk is invoked recursively with visitor -// w for each of the non-nil children of node, followed by a call of -// w.Visit(nil). -func walk(v visitor, node Node) { - if v = v.Before(node); v == nil { + if before != nil && !before(node) { return } // TODO: record the comment groups and interleave with the values like for // parsing and printing? - for _, c := range Comments(node) { - walk(v, c) - } + walkList(Comments(node), before, after) // walk children // (the order of the cases matches the order @@ -75,189 +46,121 @@ func walk(v visitor, node Node) { // nothing to do case *CommentGroup: - for _, c := range n.List { - walk(v, c) - } + walkList(n.List, before, after) case *Attribute: // nothing to do case *Field: - walk(v, n.Label) + Walk(n.Label, before, after) if n.Value != nil { - walk(v, n.Value) - } - for _, a := range n.Attrs { - walk(v, a) + Walk(n.Value, before, after) } + walkList(n.Attrs, before, after) case *Func: - walkExprList(v, n.Args) - walk(v, n.Ret) + walkList(n.Args, before, after) + Walk(n.Ret, before, after) case *StructLit: - walkDeclList(v, n.Elts) + walkList(n.Elts, before, after) // Expressions case *BottomLit, *BadExpr, *Ident, *BasicLit: // nothing to do case *Interpolation: - for _, e := range n.Elts { - walk(v, e) - } + walkList(n.Elts, before, after) case *ListLit: - walkExprList(v, n.Elts) + walkList(n.Elts, before, after) case *Ellipsis: if n.Type != nil { - walk(v, n.Type) + Walk(n.Type, before, after) } case *ParenExpr: - walk(v, n.X) + Walk(n.X, before, after) case *SelectorExpr: - walk(v, n.X) - walk(v, n.Sel) + Walk(n.X, before, after) + Walk(n.Sel, before, after) case *IndexExpr: - walk(v, n.X) - walk(v, n.Index) + Walk(n.X, before, after) + Walk(n.Index, before, after) case *SliceExpr: - walk(v, n.X) + Walk(n.X, before, after) if n.Low != nil { - walk(v, n.Low) + Walk(n.Low, before, after) } if n.High != nil { - walk(v, n.High) + Walk(n.High, before, after) } case *CallExpr: - walk(v, n.Fun) - walkExprList(v, n.Args) + Walk(n.Fun, before, after) + walkList(n.Args, before, after) case *UnaryExpr: - walk(v, n.X) + Walk(n.X, before, after) case *BinaryExpr: - walk(v, n.X) - walk(v, n.Y) + Walk(n.X, before, after) + Walk(n.Y, before, after) // Declarations case *ImportSpec: if n.Name != nil { - walk(v, n.Name) + Walk(n.Name, before, after) } - walk(v, n.Path) + Walk(n.Path, before, after) case *BadDecl: // nothing to do case *ImportDecl: - for _, s := range n.Specs { - walk(v, s) - } + walkList(n.Specs, before, after) case *EmbedDecl: - walk(v, n.Expr) + Walk(n.Expr, before, after) case *LetClause: - walk(v, n.Ident) - walk(v, n.Expr) + Walk(n.Ident, before, after) + Walk(n.Expr, before, after) case *Alias: - walk(v, n.Ident) - walk(v, n.Expr) + Walk(n.Ident, before, after) + Walk(n.Expr, before, after) case *Comprehension: - for _, c := range n.Clauses { - walk(v, c) - } - walk(v, n.Value) + walkList(n.Clauses, before, after) + Walk(n.Value, before, after) // Files and packages case *File: - walkDeclList(v, n.Decls) + walkList(n.Decls, before, after) case *Package: - walk(v, n.Name) + Walk(n.Name, before, after) case *ForClause: if n.Key != nil { - walk(v, n.Key) + Walk(n.Key, before, after) } - walk(v, n.Value) - walk(v, n.Source) + Walk(n.Value, before, after) + Walk(n.Source, before, after) case *IfClause: - walk(v, n.Condition) + Walk(n.Condition, before, after) default: panic(fmt.Sprintf("Walk: unexpected node type %T", n)) } - v.After(node) -} - -type inspector struct { - before func(Node) bool - after func(Node) - - commentStack []commentFrame - current commentFrame -} - -type commentFrame struct { - cg []*CommentGroup - pos int8 -} - -func (f *inspector) Before(node Node) visitor { - if f.before == nil || f.before(node) { - f.commentStack = append(f.commentStack, f.current) - f.current = commentFrame{cg: Comments(node)} - f.visitComments(f.current.pos) - return f - } - return nil -} - -func (f *inspector) After(node Node) { - f.visitComments(127) - p := len(f.commentStack) - 1 - f.current = f.commentStack[p] - f.commentStack = f.commentStack[:p] - f.current.pos++ - if f.after != nil { - f.after(node) - } -} - -func (f *inspector) Token(t token.Token) { - f.current.pos++ -} - -func (f *inspector) visitComments(pos int8) { - c := &f.current - for ; len(c.cg) > 0; c.cg = c.cg[1:] { - cg := c.cg[0] - if cg.Position == pos { - continue - } - if f.before == nil || f.before(cg) { - for _, c := range cg.List { - if f.before == nil || f.before(c) { - if f.after != nil { - f.after(c) - } - } - } - if f.after != nil { - f.after(cg) - } - } + if after != nil { + after(node) } } diff --git a/vendor/cuelang.org/go/cue/build.go b/vendor/cuelang.org/go/cue/build.go index 287a4d0d59..8d5a3f7919 100644 --- a/vendor/cuelang.org/go/cue/build.go +++ b/vendor/cuelang.org/go/cue/build.go @@ -31,7 +31,7 @@ import ( // The zero value of Runtime works for legacy reasons, but // should not be used. It may panic at some point. // -// Deprecated: use Context. +// Deprecated: use [Context]. type Runtime runtime.Runtime func (r *Runtime) runtime() *runtime.Runtime { @@ -57,7 +57,8 @@ func (r *Runtime) complete(p *build.Instance, v *adt.Vertex) (*Instance, error) // name in position information. The source may import builtin packages. Use // Build to allow importing non-builtin packages. // -// Deprecated: use Parse or ParseBytes. The use of Instance is being phased out. +// Deprecated: use [Context] with methods like [Context.CompileString] or [Context.CompileBytes]. +// The use of [Instance] is being phased out. func (r *hiddenRuntime) Compile(filename string, source interface{}) (*Instance, error) { cfg := &runtime.Config{Filename: filename} v, p := r.runtime().Compile(cfg, source) @@ -67,7 +68,7 @@ func (r *hiddenRuntime) Compile(filename string, source interface{}) (*Instance, // CompileFile compiles the given source file into an Instance. The source may // import builtin packages. Use Build to allow importing non-builtin packages. // -// Deprecated: use BuildFile. The use of Instance is being phased out. +// Deprecated: use [Context.BuildFile]. The use of [Instance] is being phased out. func (r *hiddenRuntime) CompileFile(file *ast.File) (*Instance, error) { v, p := r.runtime().CompileFile(nil, file) return r.complete(p, v) @@ -77,7 +78,7 @@ func (r *hiddenRuntime) CompileFile(file *ast.File) (*Instance, error) { // may import builtin packages. Use Build to allow importing non-builtin // packages. // -// Deprecated: use BuildExpr. The use of Instance is being phased out. +// Deprecated: use [Context.BuildExpr]. The use of [Instance] is being phased out. func (r *hiddenRuntime) CompileExpr(expr ast.Expr) (*Instance, error) { f, err := astutil.ToFile(expr) if err != nil { @@ -148,7 +149,7 @@ func (r *hiddenRuntime) build(instances []*build.Instance) ([]*Instance, error) // FromExpr creates an instance from an expression. // Any references must be resolved beforehand. // -// Deprecated: use CompileExpr +// Deprecated: use [Context.BuildExpr]. The use of [Instance] is being phased out. func (r *hiddenRuntime) FromExpr(expr ast.Expr) (*Instance, error) { return r.CompileFile(&ast.File{ Decls: []ast.Decl{&ast.EmbedDecl{Expr: expr}}, diff --git a/vendor/cuelang.org/go/cue/build/doc.go b/vendor/cuelang.org/go/cue/build/doc.go index 52421c65d8..84577fb41b 100644 --- a/vendor/cuelang.org/go/cue/build/doc.go +++ b/vendor/cuelang.org/go/cue/build/doc.go @@ -13,4 +13,4 @@ // limitations under the License. // Package build defines collections of CUE files to build an instance. -package build // import "cuelang.org/go/cue/build" +package build diff --git a/vendor/cuelang.org/go/cue/build/import.go b/vendor/cuelang.org/go/cue/build/import.go index 996edb0afe..71ae484961 100644 --- a/vendor/cuelang.org/go/cue/build/import.go +++ b/vendor/cuelang.org/go/cue/build/import.go @@ -18,7 +18,6 @@ import ( "sort" "strconv" - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" ) @@ -54,23 +53,17 @@ func (inst *Instance) complete() errors.Error { ) for _, f := range inst.Files { - for _, decl := range f.Decls { - d, ok := decl.(*ast.ImportDecl) - if !ok { - continue - } - for _, spec := range d.Specs { - quoted := spec.Path.Value - path, err := strconv.Unquote(quoted) - if err != nil { - inst.Err = errors.Append(inst.Err, - errors.Newf( - spec.Path.Pos(), - "%s: parser returned invalid quoted string: <%s>", - f.Filename, quoted)) - } - imported[path] = append(imported[path], spec.Pos()) + for _, spec := range f.Imports { + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + inst.Err = errors.Append(inst.Err, + errors.Newf( + spec.Path.Pos(), + "%s: parser returned invalid quoted string: <%s>", + f.Filename, quoted)) } + imported[path] = append(imported[path], spec.Pos()) } } @@ -89,10 +82,10 @@ func (inst *Instance) complete() errors.Error { if inst.loadFunc != nil { for i, path := range paths { - isLocal := IsLocalImport(path) - if isLocal { - // path = dirToImportPath(filepath.Join(dir, path)) - } + // isLocal := IsLocalImport(path) + // if isLocal { + // path = dirToImportPath(filepath.Join(dir, path)) + // } imp := c.imports[path] if imp == nil { diff --git a/vendor/cuelang.org/go/cue/build/instance.go b/vendor/cuelang.org/go/cue/build/instance.go index cc0abb8ae0..42270ece90 100644 --- a/vendor/cuelang.org/go/cue/build/instance.go +++ b/vendor/cuelang.org/go/cue/build/instance.go @@ -26,7 +26,6 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // An Instance describes the collection of files, and its imports, necessary @@ -239,9 +238,9 @@ func (inst *Instance) AddSyntax(file *ast.File) errors.Error { astutil.Resolve(file, func(pos token.Pos, msg string, args ...interface{}) { inst.Err = errors.Append(inst.Err, errors.Newf(pos, msg, args...)) }) - _, pkg, pos := internal.PackageInfo(file) - if pkg != "" && pkg != "_" && !inst.setPkg(pkg) && pkg != inst.PkgName { - err := errors.Newf(pos, + pkg := file.PackageName() + if pkg != "" && pkg != "_" && !inst.User && !inst.setPkg(pkg) && pkg != inst.PkgName { + err := errors.Newf(file.Pos(), "package name %q conflicts with previous package name %q", pkg, inst.PkgName) inst.ReportError(err) diff --git a/vendor/cuelang.org/go/cue/context.go b/vendor/cuelang.org/go/cue/context.go index 32373551fb..e56a8a917e 100644 --- a/vendor/cuelang.org/go/cue/context.go +++ b/vendor/cuelang.org/go/cue/context.go @@ -250,6 +250,10 @@ func (c *Context) CompileBytes(b []byte, options ...BuildOption) Value { func (c *Context) make(v *adt.Vertex) Value { opCtx := newContext(c.runtime()) + // TODO: this is currently needed to ensure that node is properly recognized + // as evaluated. Not dereferencing nodes, however, will have the benefit of + // retaining more information. Remove the indirection when the code will be + // able to properly handle this. x := newValueRoot(c.runtime(), opCtx, v) adt.AddStats(opCtx) return x diff --git a/vendor/cuelang.org/go/cue/cue.go b/vendor/cuelang.org/go/cue/cue.go index ac68917517..1b1c878f70 100644 --- a/vendor/cuelang.org/go/cue/cue.go +++ b/vendor/cuelang.org/go/cue/cue.go @@ -15,7 +15,7 @@ // Package cue is the main API for CUE evaluation. // // [Value] is the main type that represents CUE evaluations. -// Values are created with a [cuecontext.Context]. +// Values are created with a [cuelang.org/go/cue/cuecontext.Context]. // Only values created from the same Context can be involved in the same operation. // Values created from the same Context are not safe for concurrent use, // which we intend to change in the future. @@ -32,10 +32,10 @@ // Note that the following types are DEPRECATED and their usage should be // avoided if possible: // -// [FieldInfo] -// [Instance] -// [Runtime] -// [Struct] +// - [FieldInfo] +// - [Instance] +// - [Runtime] +// - [Struct] // // Many types also have deprecated methods. Code that already uses deprecated // methods can keep using them for at least some time. We aim to provide a diff --git a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go index 1d25463a7d..60fb2a9e2e 100644 --- a/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go +++ b/vendor/cuelang.org/go/cue/cuecontext/cuecontext.go @@ -15,8 +15,13 @@ package cuecontext import ( + "fmt" + "cuelang.org/go/cue" + "cuelang.org/go/internal" "cuelang.org/go/internal/core/runtime" + "cuelang.org/go/internal/cuedebug" + "cuelang.org/go/internal/envflag" _ "cuelang.org/go/pkg" ) @@ -26,9 +31,20 @@ type Option struct { apply func(r *runtime.Runtime) } +// defaultFlags defines the debug flags that are set by default. +var defaultFlags cuedebug.Config + +func init() { + if err := envflag.Parse(&defaultFlags, ""); err != nil { + panic(err) + } +} + // New creates a new Context. func New(options ...Option) *cue.Context { r := runtime.New() + // Ensure default behavior if the flags are not set explicitly. + r.SetDebugOptions(&defaultFlags) for _, o := range options { o.apply(r) } @@ -46,3 +62,46 @@ func Interpreter(i ExternInterpreter) Option { r.SetInterpreter(i) }} } + +type EvalVersion = internal.EvaluatorVersion + +const ( + // EvalDefault is the latest stable version of the evaluator. + EvalDefault EvalVersion = EvalV2 + + // EvalExperiment refers to the latest unstable version of the evaluator. + // Note that this version may change without notice. + EvalExperiment EvalVersion = EvalV3 + + // EvalV2 is the currently latest stable version of the evaluator. + // It was introduced in CUE version 0.3 and is being maintained until 2024. + EvalV2 EvalVersion = internal.DefaultVersion + + // EvalV3 is the currently experimental version of the evaluator. + // It was introduced in 2024 and brought a new disjunction algorithm, + // a new closedness algorithm, a new core scheduler, and adds performance + // enhancements like structure sharing. + EvalV3 EvalVersion = internal.DevVersion +) + +// EvaluatorVersion indicates which version of the evaluator to use. Currently +// only experimental versions can be selected as an alternative. +func EvaluatorVersion(v EvalVersion) Option { + return Option{func(r *runtime.Runtime) { + r.SetVersion(v) + }} +} + +// CUE_DEBUG takes a string with the same contents as CUE_DEBUG and configures +// the context with the relevant debug options. It panics for unknown or +// malformed options. +func CUE_DEBUG(s string) Option { + var c cuedebug.Config + if err := envflag.Parse(&c, s); err != nil { + panic(fmt.Errorf("cuecontext.CUE_DEBUG: %v", err)) + } + + return Option{func(r *runtime.Runtime) { + r.SetDebugOptions(&c) + }} +} diff --git a/vendor/cuelang.org/go/cue/decode.go b/vendor/cuelang.org/go/cue/decode.go index d81c92a7ae..b282306a30 100644 --- a/vendor/cuelang.org/go/cue/decode.go +++ b/vendor/cuelang.org/go/cue/decode.go @@ -16,10 +16,11 @@ package cue import ( "bytes" + "cmp" "encoding" "encoding/json" "reflect" - "sort" + "slices" "strconv" "strings" "sync" @@ -101,7 +102,7 @@ func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { switch x.Kind() { case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface: // nullable types - if v.Null() == nil || !v.IsConcrete() { + if v.IsNull() || !v.IsConcrete() { d.clear(x) return } @@ -114,7 +115,7 @@ func (d *decoder) decode(x reflect.Value, v Value, isPtr bool) { } } - ij, it, x := indirect(x, v.Null() == nil) + ij, it, x := indirect(x, v.IsNull()) if ij != nil { b, err := v.marshalJSON() @@ -488,19 +489,16 @@ type goField struct { omitEmpty bool } -// byIndex sorts goField by index sequence. -type byIndex []goField - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false +func compareFieldByIndex(a, b goField) int { + for i, x := range a.index { + if i >= len(b.index) { + break } - if xik != x[j].index[k] { - return xik < x[j].index[k] + if c := cmp.Compare(x, b.index[i]); c != 0 { + return c } } - return len(x[i].index) < len(x[j].index) + return cmp.Compare(len(a.index), len(b.index)) } // typeFields returns a list of fields that JSON should recognize for the given type. @@ -614,21 +612,24 @@ func typeFields(t reflect.Type) structFields { } } - sort.Slice(fields, func(i, j int) bool { - x := fields + slices.SortFunc(fields, func(a, b goField) int { // sort field by name, breaking ties with depth, then // breaking ties with "name came from json tag", then // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name + if c := cmp.Compare(a.name, b.name); c != 0 { + return c } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) + if c := cmp.Compare(len(a.index), len(b.index)); c != 0 { + return c } - if x[i].tag != x[j].tag { - return x[i].tag + if a.tag != b.tag { + if a.tag { + return 1 + } else { + return -1 + } } - return byIndex(x).Less(i, j) + return compareFieldByIndex(a, b) }) // Delete all fields that are hidden by the Go rules for embedded fields, @@ -660,7 +661,7 @@ func typeFields(t reflect.Type) structFields { } fields = out - sort.Slice(fields, byIndex(fields).Less) + slices.SortFunc(fields, compareFieldByIndex) nameIndex := make(map[string]int, len(fields)) for i, field := range fields { diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go index 8fb87039e7..31cb96377c 100644 --- a/vendor/cuelang.org/go/cue/errors/errors.go +++ b/vendor/cuelang.org/go/cue/errors/errors.go @@ -17,18 +17,17 @@ // The pivotal error type in CUE packages is the interface type Error. // The information available in such errors can be most easily retrieved using // the Path, Positions, and Print functions. -package errors // import "cuelang.org/go/cue/errors" +package errors import ( + "cmp" "errors" "fmt" "io" "path/filepath" - "sort" + "slices" "strings" - "github.com/mpvl/unique" - "cuelang.org/go/cue/token" ) @@ -85,7 +84,8 @@ func NewMessagef(format string, args ...interface{}) Message { } // NewMessage creates an error message for human consumption. -// Deprecated: Use NewMessagef instead. +// +// Deprecated: Use [NewMessagef] instead. func NewMessage(format string, args []interface{}) Message { return NewMessagef(format, args...) } @@ -133,12 +133,11 @@ func Positions(err error) []token.Pos { a := make([]token.Pos, 0, 3) - sortOffset := 0 pos := e.Position() if pos.IsValid() { a = append(a, pos) - sortOffset = 1 } + sortOffset := len(a) for _, p := range e.InputPositions() { if p.IsValid() && p != pos { @@ -146,19 +145,10 @@ func Positions(err error) []token.Pos { } } - byPos := byPos(a[sortOffset:]) - sort.Sort(byPos) - k := unique.ToFront(byPos) - return a[:k+sortOffset] + slices.SortFunc(a[sortOffset:], comparePos) + return slices.Compact(a) } -type byPos []token.Pos - -func (s *byPos) Truncate(n int) { (*s) = (*s)[:n] } -func (s byPos) Len() int { return len(s) } -func (s byPos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byPos) Less(i, j int) bool { return comparePos(s[i], s[j]) == -1 } - // Path returns the path of an Error if err is of that type. func Path(err error) []string { if e := Error(nil); errors.As(err, &e) { @@ -274,13 +264,12 @@ var _ Error = &posError{} // the offending token, and the error condition is described // by Msg. type posError struct { - pos token.Pos - inputs []token.Pos + pos token.Pos Message } func (e *posError) Path() []string { return nil } -func (e *posError) InputPositions() []token.Pos { return e.inputs } +func (e *posError) InputPositions() []token.Pos { return nil } func (e *posError) Position() token.Pos { return e.pos } // Append combines two errors, flattening Lists as necessary. @@ -367,66 +356,26 @@ func (p *list) Add(err Error) { // Reset resets an List to no errors. func (p *list) Reset() { *p = (*p)[:0] } -// List implements the sort Interface. -func (p list) Len() int { return len(p) } -func (p list) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p list) Less(i, j int) bool { - if c := comparePos(p[i].Position(), p[j].Position()); c != 0 { - return c == -1 - } - // Note that it is not sufficient to simply compare file offsets because - // the offsets do not reflect modified line information (through //line - // comments). - - if !equalPath(p[i].Path(), p[j].Path()) { - return lessPath(p[i].Path(), p[j].Path()) - } - return p[i].Error() < p[j].Error() -} - -func lessOrMore(isLess bool) int { - if isLess { - return -1 - } - return 1 -} - func comparePos(a, b token.Pos) int { - if a.Filename() != b.Filename() { - return lessOrMore(a.Filename() < b.Filename()) + if c := cmp.Compare(a.Filename(), b.Filename()); c != 0 { + return c } - if a.Line() != b.Line() { - return lessOrMore(a.Line() < b.Line()) + if c := cmp.Compare(a.Line(), b.Line()); c != 0 { + return c } - if a.Column() != b.Column() { - return lessOrMore(a.Column() < b.Column()) - } - return 0 + return cmp.Compare(a.Column(), b.Column()) } -func lessPath(a, b []string) bool { +func comparePath(a, b []string) int { for i, x := range a { if i >= len(b) { - return false - } - if x != b[i] { - return x < b[i] + break } - } - return len(a) < len(b) -} - -func equalPath(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, x := range a { - if x != b[i] { - return false + if c := cmp.Compare(x, b[i]); c != 0 { + return c } } - return true + return cmp.Compare(len(a), len(b)) } // Sanitize sorts multiple errors and removes duplicates on a best effort basis. @@ -459,7 +408,19 @@ func (p list) sanitize() list { // other errors are sorted by error message, and before any *posError // entry. func (p list) Sort() { - sort.Sort(p) + slices.SortFunc(p, func(a, b Error) int { + if c := comparePos(a.Position(), b.Position()); c != 0 { + return c + } + // Note that it is not sufficient to simply compare file offsets because + // the offsets do not reflect modified line information (through //line + // comments). + if c := comparePath(a.Path(), b.Path()); c != 0 { + return c + } + return cmp.Compare(a.Error(), b.Error()) + + }) } // RemoveMultiples sorts an List and removes all but the first error per line. @@ -486,7 +447,7 @@ func approximateEqual(a, b Error) bool { return aPos.Filename() == bPos.Filename() && aPos.Line() == bPos.Line() && aPos.Column() == bPos.Column() && - equalPath(a.Path(), b.Path()) + comparePath(a.Path(), b.Path()) == 0 } // An List implements the error interface. @@ -589,19 +550,14 @@ func writeErr(w io.Writer, err Error) { for { u := errors.Unwrap(err) - printed := false msg, args := err.Msg() - s := fmt.Sprintf(msg, args...) - if s != "" || u == nil { // print at least something - _, _ = io.WriteString(w, s) - printed = true - } + n, _ := fmt.Fprintf(w, msg, args...) if u == nil { break } - if printed { + if n > 0 { _, _ = io.WriteString(w, ": ") } err, _ = u.(Error) diff --git a/vendor/cuelang.org/go/cue/format/format.go b/vendor/cuelang.org/go/cue/format/format.go index cd1d460f59..fc80d4f9a0 100644 --- a/vendor/cuelang.org/go/cue/format/format.go +++ b/vendor/cuelang.org/go/cue/format/format.go @@ -13,7 +13,7 @@ // limitations under the License. // Package format implements standard formatting of CUE configurations. -package format // import "cuelang.org/go/cue/format" +package format // TODO: this package is in need of a rewrite. When doing so, the API should // allow for reformatting an AST, without actually writing bytes. @@ -202,7 +202,7 @@ func newFormatter(p *printer) *formatter { type whiteSpace int const ( - ignore whiteSpace = 0 + _ whiteSpace = 0 // write a space, or disallow it blank whiteSpace = 1 << iota @@ -262,13 +262,6 @@ func (f *formatter) formfeed() whiteSpace { return formfeed } -func (f *formatter) wsOverride(def whiteSpace) whiteSpace { - if f.current.override == ignore { - return def - } - return f.current.override -} - func (f *formatter) onOneLine(node ast.Node) bool { a := node.Pos() b := node.End() @@ -289,7 +282,7 @@ func (f *formatter) before(node ast.Node) bool { if ok && len(s.Elts) <= 1 && f.current.nodeSep != blank && f.onOneLine(node) { f.current.nodeSep = blank } - f.current.cg = node.Comments() + f.current.cg = ast.Comments(node) f.visitComments(f.current.pos) return true } diff --git a/vendor/cuelang.org/go/cue/format/node.go b/vendor/cuelang.org/go/cue/format/node.go index 1aff237491..70d571ac07 100644 --- a/vendor/cuelang.org/go/cue/format/node.go +++ b/vendor/cuelang.org/go/cue/format/node.go @@ -87,16 +87,16 @@ func nestDepth(f *ast.Field) int { // TODO: be more accurate and move to astutil func hasDocComments(d ast.Decl) bool { - if len(d.Comments()) > 0 { + if len(ast.Comments(d)) > 0 { return true } switch x := d.(type) { case *ast.Field: - return len(x.Label.Comments()) > 0 + return len(ast.Comments(x.Label)) > 0 case *ast.Alias: - return len(x.Ident.Comments()) > 0 + return len(ast.Comments(x.Ident)) > 0 case *ast.LetClause: - return len(x.Ident.Comments()) > 0 + return len(ast.Comments(x.Ident)) > 0 } return false } @@ -188,6 +188,26 @@ func (f *formatter) walkListElems(list []ast.Expr) { f.before(nil) for _, x := range list { f.before(x) + + // This is a hack to ensure that comments are printed correctly in lists. + // A comment must be printed after each element in a list, but we can't + // print a comma at the end of a comment because it will be considered + // part of the comment and ignored. + // To fix this we collect all comments that appear after the element, + // and only handle them after it's formatted. + var commentsAfter []*ast.CommentGroup + splitComments := x.Pos().IsValid() + if splitComments { + for _, cg := range ast.Comments(x) { + if x.Pos().Before(cg.Pos()) { + commentsAfter = append(commentsAfter, cg) + } + } + } + + if splitComments { + f.current.cg = nil + } switch n := x.(type) { case *ast.Comprehension: f.walkClauseList(n.Clauses, blank) @@ -208,6 +228,10 @@ func (f *formatter) walkListElems(list []ast.Expr) { f.exprRaw(n, token.LowestPrec, 1) } f.print(comma, blank) + + if splitComments { + f.current.cg = commentsAfter + } f.after(x) } f.after(nil) @@ -235,7 +259,7 @@ func (f *formatter) inlineField(n *ast.Field) *ast.Field { regular := internal.IsRegularField(n) // shortcut single-element structs. // If the label has a valid position, we assume that an unspecified - // Lbrace signals the intend to collapse fields. + // Lbrace signals the intent to collapse fields. if !n.Label.Pos().IsValid() && !(f.printer.cfg.simplify && regular) { return nil } @@ -304,7 +328,7 @@ func (f *formatter) decl(decl ast.Decl) { nextFF := f.nextNeedsFormfeed(n.Value) tab := vtab - if nextFF { + if nextFF || f.prevLbraceOnLine { tab = blank } @@ -469,7 +493,7 @@ func (f *formatter) label(l ast.Label, constraint token.Token) { // if the AST is not generated by the parser. name := n.Name if !ast.IsValidIdent(name) { - name = literal.String.Quote(n.Name) + name = literal.Label.Quote(n.Name) } f.print(n.NamePos, name) @@ -479,7 +503,7 @@ func (f *formatter) label(l ast.Label, constraint token.Token) { // according to spec. if strings.HasPrefix(str, `"""`) || strings.HasPrefix(str, "#") { if u, err := literal.Unquote(str); err == nil { - str = literal.String.Quote(u) + str = literal.Label.Quote(u) } } f.print(n.ValuePos, str) @@ -631,20 +655,20 @@ func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { switch { case len(x.Elts) == 0: - if !x.Rbrace.HasRelPos() { - // collapse curly braces if the body is empty. - ffAlt := blank | nooverride - for _, c := range x.Comments() { - if c.Position == 1 { - ffAlt = ff - } + // collapse curly braces if the body is empty. + ffAlt := blank | nooverride + for _, c := range x.Comments() { + if c.Position == 1 { + ffAlt = ff + break } - ff = ffAlt } + ff = ffAlt case !x.Rbrace.HasRelPos() || !x.Elts[0].Pos().HasRelPos(): ws |= newline | nooverride } f.print(x.Lbrace, token.LBRACE, &l, ws, ff, indent) + f.prevLbraceOnLine = l == f.lineout f.walkDeclList(x.Elts) f.matchUnindent() @@ -659,7 +683,20 @@ func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { f.print(ws, x.Rbrace, token.RBRACE) case *ast.ListLit: - f.print(x.Lbrack, token.LBRACK, noblank, indent) + ws := noblank | indent + if len(x.Elts) == 0 { + // collapse square brackets if the body is empty. + collapseWs := blank | nooverride + for _, c := range x.Comments() { + if c.Position == 1 { + collapseWs = ws + break + } + } + ws |= collapseWs + } + + f.print(x.Lbrack, token.LBRACK, ws) f.walkListElems(x.Elts) f.print(trailcomma, noblank) f.visitComments(f.current.pos) @@ -849,7 +886,7 @@ func (f *formatter) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { prec := x.Op.Precedence() if prec < prec1 { // parenthesis needed - // Note: The parser inserts an syntax.ParenExpr node; thus this case + // Note: The parser inserts a syntax.ParenExpr node; thus this case // can only occur if the AST is created in a different way. // defer p.pushComment(nil).pop() f.print(token.LPAREN, nooverride) diff --git a/vendor/cuelang.org/go/cue/format/printer.go b/vendor/cuelang.org/go/cue/format/printer.go index 3e02ca637e..a9ffc02717 100644 --- a/vendor/cuelang.org/go/cue/format/printer.go +++ b/vendor/cuelang.org/go/cue/format/printer.go @@ -41,9 +41,10 @@ type printer struct { lastTok token.Token // last token printed (syntax.ILLEGAL if it's whitespace) - output []byte - indent int - spaceBefore bool + output []byte + indent int + spaceBefore bool + prevLbraceOnLine bool // true if a '{' has been written on the current line errs errors.Error } @@ -269,17 +270,17 @@ func (p *printer) writeWhitespace(ws whiteSpace) { case ws&newsection != 0: p.maybeIndentLine(ws) p.writeByte('\f', 2) - p.lineout += 2 + p.incrementLine(2) p.spaceBefore = true case ws&formfeed != 0: p.maybeIndentLine(ws) p.writeByte('\f', 1) - p.lineout++ + p.incrementLine(1) p.spaceBefore = true case ws&newline != 0: p.maybeIndentLine(ws) p.writeByte('\n', 1) - p.lineout++ + p.incrementLine(1) p.spaceBefore = true case ws&declcomma != 0: p.writeByte(',', 1) @@ -295,6 +296,13 @@ func (p *printer) writeWhitespace(ws whiteSpace) { } } +func (p *printer) incrementLine(n int) { + if n != 0 { + p.prevLbraceOnLine = false + } + p.lineout += line(n) +} + func (p *printer) markLineIndent(ws whiteSpace) { p.indentStack = append(p.indentStack, ws) } diff --git a/vendor/cuelang.org/go/cue/format/simplify.go b/vendor/cuelang.org/go/cue/format/simplify.go index f4981978cc..d36c58e700 100644 --- a/vendor/cuelang.org/go/cue/format/simplify.go +++ b/vendor/cuelang.org/go/cue/format/simplify.go @@ -52,7 +52,9 @@ func (s *labelSimplifier) processDecls(decls []ast.Decl) { for _, d := range decls { switch x := d.(type) { case *ast.Field: - x.Label = astutil.Apply(x.Label, sc.replace, nil).(ast.Label) + if _, ok := x.Label.(*ast.BasicLit); ok { + x.Label = astutil.Apply(x.Label, nil, sc.replace).(ast.Label) + } } } } @@ -109,5 +111,5 @@ func (s *labelSimplifier) replace(c astutil.Cursor) bool { c.Replace(ast.NewIdent(str)) } } - return true + return false } diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go index ee1f74faf3..cea6ffc33a 100644 --- a/vendor/cuelang.org/go/cue/instance.go +++ b/vendor/cuelang.org/go/cue/instance.go @@ -18,7 +18,6 @@ import ( "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/compile" "cuelang.org/go/internal/core/runtime" @@ -138,21 +137,6 @@ func getImportFromPath(x *runtime.Runtime, id string) *Instance { return inst } -func init() { - internal.MakeInstance = func(value interface{}) interface{} { - v := value.(Value) - x := v.eval(v.ctx()) - st, ok := x.(*adt.Vertex) - if !ok { - st = &adt.Vertex{} - st.AddConjunct(adt.MakeRootConjunct(nil, x)) - } - return addInst(v.idx, &Instance{ - root: st, - }) - } -} - // newInstance creates a new instance. Use Insert to populate the instance. func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance { // TODO: associate root source with structLit. @@ -260,7 +244,7 @@ func Merge(inst ...*Instance) *Instance { // identifier to bind to the top-level field in inst. The top-level fields in // inst take precedence over predeclared identifier and builtin functions. // -// Deprecated: use Context.Build +// Deprecated: use [Context.BuildInstance] func (inst *hiddenInstance) Build(p *build.Instance) *Instance { p.Complete() @@ -289,28 +273,24 @@ func (inst *hiddenInstance) Build(p *build.Instance) *Instance { return i } -func (inst *Instance) value() Value { - return newVertexRoot(inst.index, newContext(inst.index), inst.root) -} - // Lookup reports the value at a path starting from the top level struct. The // Exists method of the returned value will report false if the path did not // exist. The Err method reports if any error occurred during evaluation. The // empty path returns the top-level configuration struct. Use LookupDef for definitions or LookupField for // any kind of field. // -// Deprecated: use Value.LookupPath +// Deprecated: use [Value.LookupPath] func (inst *hiddenInstance) Lookup(path ...string) Value { - return inst.value().Lookup(path...) + return inst.Value().Lookup(path...) } // LookupDef reports the definition with the given name within struct v. The // Exists method of the returned value will report false if the definition did // not exist. The Err method reports if any error occurred during evaluation. // -// Deprecated: use Value.LookupPath +// Deprecated: use [Value.LookupPath] func (inst *hiddenInstance) LookupDef(path string) Value { - return inst.value().LookupDef(path) + return inst.Value().LookupDef(path) } // LookupField reports a Field at a path starting from v, or an error if the @@ -318,12 +298,9 @@ func (inst *hiddenInstance) LookupDef(path string) Value { // // It cannot look up hidden or unexported fields. // -// Deprecated: this API does not work with new-style definitions. Use -// FieldByName defined on inst.Value(). -// -// Deprecated: use Value.LookupPath +// Deprecated: use [Value.LookupPath] func (inst *hiddenInstance) LookupField(path ...string) (f FieldInfo, err error) { - v := inst.value() + v := inst.Value() for _, k := range path { s, err := v.Struct() if err != nil { @@ -349,7 +326,7 @@ func (inst *hiddenInstance) LookupField(path ...string) (f FieldInfo, err error) // a Value. In the latter case, it will panic if the Value is not from the same // Runtime. // -// Deprecated: use Value.FillPath() +// Deprecated: use [Value.FillPath] func (inst *hiddenInstance) Fill(x interface{}, path ...string) (*Instance, error) { v := inst.Value().Fill(x, path...) diff --git a/vendor/cuelang.org/go/cue/literal/num.go b/vendor/cuelang.org/go/cue/literal/num.go index 64925e2f4f..17a1c1ef02 100644 --- a/vendor/cuelang.org/go/cue/literal/num.go +++ b/vendor/cuelang.org/go/cue/literal/num.go @@ -102,10 +102,13 @@ func ParseNum(s string, n *NumInfo) error { if !n.next() { return n.errorf("invalid number %q", s) } - if n.ch == '-' { + switch n.ch { + case '-': n.neg = true n.buf = append(n.buf, '-') n.next() + case '+': + n.next() } seenDecimalPoint := false if n.ch == '.' { diff --git a/vendor/cuelang.org/go/cue/literal/quote.go b/vendor/cuelang.org/go/cue/literal/quote.go index 8550bb2419..bcf8e78a08 100644 --- a/vendor/cuelang.org/go/cue/literal/quote.go +++ b/vendor/cuelang.org/go/cue/literal/quote.go @@ -65,6 +65,7 @@ func tabs(n int) string { // WithOptionalIndent is like WithTabIndent, but only returns a multiline // strings if it doesn't contain any newline characters. func (f Form) WithOptionalTabIndent(tabs int) Form { + // TODO(mvdan): remove this optimization once Go 1.23 lands with https://go.dev/cl/536615 if tabs < len(tabIndent) { f.indent = tabIndent[:tabs] } else { @@ -95,7 +96,7 @@ var ( // TODO: ExactString: quotes to bytes type if the string cannot be // represented without loss of accuracy. - // Label is like Text, but optimized for labels. + // Label is like String, but optimized for labels. Label Form = stringForm // Bytes defines the format of bytes literal. diff --git a/vendor/cuelang.org/go/cue/load/config.go b/vendor/cuelang.org/go/cue/load/config.go index 8d131dcf5b..8506a4ef83 100644 --- a/vendor/cuelang.org/go/cue/load/config.go +++ b/vendor/cuelang.org/go/cue/load/config.go @@ -15,25 +15,27 @@ package load import ( + "context" + "fmt" "io" "os" "path/filepath" - "strings" - "cuelabs.dev/go/oci/ociregistry" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" "cuelang.org/go/internal" - "cuelang.org/go/internal/mod/modfile" + "cuelang.org/go/internal/cueexperiment" + "cuelang.org/go/mod/modconfig" + "cuelang.org/go/mod/modfile" + "cuelang.org/go/mod/module" ) const ( cueSuffix = ".cue" modDir = "cue.mod" moduleFile = "module.cue" - pkgDir = "pkg" ) // FromArgsUsage is a partial usage message that applications calling @@ -129,12 +131,17 @@ type Config struct { // A Module is a collection of packages and instances that are within the // directory hierarchy rooted at the module root. The module root can be // marked with a cue.mod file. + // If this is a relative path, it will be interpreted relative to [Config.Dir]. ModuleRoot string // Module specifies the module prefix. If not empty, this value must match // the module field of an existing cue.mod file. Module string + // AcceptLegacyModules causes the module resolution code + // to accept module files that lack a language.version field. + AcceptLegacyModules bool + // modFile holds the contents of the module file, or nil // if no module file was present. If non-nil, then // after calling Config.complete, modFile.Module will be @@ -240,17 +247,13 @@ type Config struct { // a package. Tools bool - // filesMode indicates that files are specified - // explicitly on the command line. - filesMode bool - // If DataFiles is set, the loader includes entries for directories that // have no CUE files, but have recognized data files that could be converted // to CUE. DataFiles bool // StdRoot specifies an alternative directory for standard libraries. - // This is mostly used for bootstrapping. + // Deprecated: this has no effect. StdRoot string // ParseFile is called to read and parse each file when preparing a @@ -277,11 +280,20 @@ type Config struct { // Registry is used to fetch CUE module dependencies. // - // When nil, dependencies will be resolved in legacy mode: - // reading from cue.mod/pkg, cue.mod/usr, and cue.mod/gen. + // When nil, if the modules experiment is enabled + // (CUE_EXPERIMENT=modules), [modconfig.NewRegistry] + // will be used to create a registry instance using the + // usual cmd/cue conventions for environment variables + // (but see the Env field below). // - // THIS IS EXPERIMENTAL FOR NOW. DO NOT USE. - Registry ociregistry.Interface + // THIS IS EXPERIMENTAL. API MIGHT CHANGE. + Registry modconfig.Registry + + // Env provides environment variables for use in the configuration. + // Currently this is only used in the construction of the Registry + // value (see above). If this is nil, the current process's environment + // will be used. + Env []string fileSystem fileSystem } @@ -293,33 +305,20 @@ func (c *Config) stdin() io.Reader { return c.Stdin } -func toImportPath(dir string) importPath { - return importPath(filepath.ToSlash(dir)) -} - type importPath string type fsPath string -func addImportQualifier(pkg importPath, name string) (importPath, errors.Error) { - if name != "" { - s := string(pkg) - if i := strings.LastIndexByte(s, '/'); i >= 0 { - s = s[i+1:] - } - if i := strings.LastIndexByte(s, ':'); i >= 0 { - // should never happen, but just in case. - s = s[i+1:] - if s != name { - return "", errors.Newf(token.NoPos, - "non-matching package names (%s != %s)", s, name) - } - } else if s != name { - pkg += importPath(":" + name) - } +func addImportQualifier(pkg importPath, name string) (importPath, error) { + if name == "" { + return pkg, nil } - - return pkg, nil + ip := module.ParseImportPath(string(pkg)) + if ip.ExplicitQualifier && ip.Qualifier != name { + return "", fmt.Errorf("non-matching package names (%s != %s)", ip.Qualifier, name) + } + ip.Qualifier = name + return importPath(ip.String()), nil } // Complete updates the configuration information. After calling complete, @@ -352,43 +351,110 @@ func (c Config) complete() (cfg *Config, err error) { // TODO: we could populate this already with absolute file paths, // but relative paths cannot be added. Consider what is reasonable. - if err := c.fileSystem.init(&c); err != nil { + if err := c.fileSystem.init(c.Dir, c.Overlay); err != nil { return nil, err } // TODO: determine root on a package basis. Maybe we even need a // pkgname.cue.mod // Look to see if there is a cue.mod. + // + // TODO(mvdan): note that setting Config.ModuleRoot to a directory + // without a cue.mod file does not result in any error, which is confusing + // or can lead to not using the right CUE module silently. if c.ModuleRoot == "" { // Only consider the current directory by default c.ModuleRoot = c.Dir - if root := c.findRoot(c.Dir); root != "" { + if root := c.findModRoot(c.Dir); root != "" { c.ModuleRoot = root } } else if !filepath.IsAbs(c.ModuleRoot) { c.ModuleRoot = filepath.Join(c.Dir, c.ModuleRoot) } + // Note: if cueexperiment.Flags.Modules _isn't_ set but c.Registry + // is, we consider that a good enough hint that modules support + // should be enabled and hence don't return an error in that case. + if cueexperiment.Flags.Modules && c.Registry == nil { + registry, err := modconfig.NewRegistry(&modconfig.Config{ + Env: c.Env, + }) + if err != nil { + // If there's an error in the registry configuration, + // don't error immediately, but only when we actually + // need to resolve modules. + registry = errorRegistry{err} + } + c.Registry = registry + } if err := c.loadModule(); err != nil { return nil, err } return &c, nil } -func (c Config) isRoot(dir string) bool { - fs := &c.fileSystem +// loadModule loads the module file, resolves and downloads module +// dependencies. It sets c.Module if it's empty or checks it for +// consistency with the module file otherwise. +func (c *Config) loadModule() error { + // TODO: also make this work if run from outside the module? + mod := filepath.Join(c.ModuleRoot, modDir) + info, cerr := c.fileSystem.stat(mod) + if cerr != nil { + return nil + } + if !info.IsDir() { + return fmt.Errorf("cue.mod files are no longer supported; use cue.mod/module.cue") + } + mod = filepath.Join(mod, moduleFile) + f, cerr := c.fileSystem.openFile(mod) + if cerr != nil { + return nil + } + defer f.Close() + data, err := io.ReadAll(f) + if err != nil { + return err + } + parseModFile := modfile.ParseNonStrict + if c.Registry == nil { + parseModFile = modfile.ParseLegacy + } else if c.AcceptLegacyModules { + // Note: technically this does not support all legacy module + // files because some old module files might contain non-concrete + // data, but that seems like an OK restriction for now at least, + // given that no actual instances of non-concrete data in + // module files have been discovered in the wild. + parseModFile = modfile.FixLegacy + } + mf, err := parseModFile(data, mod) + if err != nil { + return err + } + c.modFile = mf + if mf.QualifiedModule() == "" { + // Backward compatibility: allow empty module.cue file. + // TODO maybe check that the rest of the fields are empty too? + return nil + } + if c.Module != "" && c.Module != mf.Module { + return errors.Newf(token.NoPos, "inconsistent modules: got %q, want %q", mf.Module, c.Module) + } + c.Module = mf.QualifiedModule() + return nil +} + +func (c Config) isModRoot(dir string) bool { // Note: cue.mod used to be a file. We still allow both to match. - _, err := fs.stat(filepath.Join(dir, modDir)) + _, err := c.fileSystem.stat(filepath.Join(dir, modDir)) return err == nil } -// findRoot returns the module root that's ancestor +// findModRoot returns the module root that's ancestor // of the given absolute directory path, or "" if none was found. -func (c Config) findRoot(absDir string) string { - fs := &c.fileSystem - +func (c Config) findModRoot(absDir string) string { abs := absDir for { - if c.isRoot(abs) { + if c.isModRoot(abs) { return abs } d := filepath.Dir(abs) @@ -398,21 +464,7 @@ func (c Config) findRoot(absDir string) string { return "" } if len(d) >= len(abs) { - break // reached top of file system, no cue.mod - } - abs = d - } - abs = absDir - - // TODO(legacy): remove this capability at some point. - for { - info, err := fs.stat(filepath.Join(abs, pkgDir)) - if err == nil && info.IsDir() { - return abs - } - d := filepath.Dir(abs) - if len(d) >= len(abs) { - return "" // reached top of file system, no pkg dir. + return "" // reached top of file system, no cue.mod } abs = d } @@ -422,6 +474,23 @@ func (c *Config) newErrInstance(err error) *build.Instance { i := c.Context.NewInstance("", nil) i.Root = c.ModuleRoot i.Module = c.Module - i.Err = errors.Promote(err, "instance") + i.Err = errors.Promote(err, "") return i } + +// errorRegistry implements [modconfig.Registry] by returning err from all methods. +type errorRegistry struct { + err error +} + +func (r errorRegistry) Requirements(ctx context.Context, m module.Version) ([]module.Version, error) { + return nil, r.err +} + +func (r errorRegistry) Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error) { + return module.SourceLoc{}, r.err +} + +func (r errorRegistry) ModuleVersions(ctx context.Context, mpath string) ([]string, error) { + return nil, r.err +} diff --git a/vendor/cuelang.org/go/cue/load/doc.go b/vendor/cuelang.org/go/cue/load/doc.go index d1b599f21c..6be5c34346 100644 --- a/vendor/cuelang.org/go/cue/load/doc.go +++ b/vendor/cuelang.org/go/cue/load/doc.go @@ -13,4 +13,4 @@ // limitations under the License. // Package load loads CUE instances. -package load // import "cuelang.org/go/cue/load" +package load diff --git a/vendor/cuelang.org/go/cue/load/errors.go b/vendor/cuelang.org/go/cue/load/errors.go index 4ac9d0919f..02b063ce6e 100644 --- a/vendor/cuelang.org/go/cue/load/errors.go +++ b/vendor/cuelang.org/go/cue/load/errors.go @@ -65,7 +65,7 @@ func (p *PackageError) Error() string { type NoFilesError struct { Package *build.Instance - ignored bool // whether any Go files were ignored due to build tags + ignored bool // whether any CUE files were ignored due to build tags } func (e *NoFilesError) Position() token.Pos { return token.NoPos } @@ -126,7 +126,7 @@ func (e *MultiplePackageError) InputPositions() []token.Pos { return nil } func (e *MultiplePackageError) Path() []string { return nil } func (e *MultiplePackageError) Msg() (string, []interface{}) { - return "found packages %q (%s) and %s (%s) in %q", []interface{}{ + return "found packages %q (%s) and %q (%s) in %q", []interface{}{ e.Packages[0], e.Files[0], e.Packages[1], diff --git a/vendor/cuelang.org/go/cue/load/fs.go b/vendor/cuelang.org/go/cue/load/fs.go index 41f388364d..8ec96dce3d 100644 --- a/vendor/cuelang.org/go/cue/load/fs.go +++ b/vendor/cuelang.org/go/cue/load/fs.go @@ -16,17 +16,20 @@ package load import ( "bytes" + "cmp" + "fmt" "io" iofs "io/fs" "os" "path/filepath" - "sort" + "slices" "strings" "time" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" + "cuelang.org/go/mod/module" ) type overlayFile struct { @@ -37,9 +40,14 @@ type overlayFile struct { isDir bool } -func (f *overlayFile) Name() string { return f.basename } -func (f *overlayFile) Size() int64 { return int64(len(f.contents)) } -func (f *overlayFile) Mode() os.FileMode { return 0644 } +func (f *overlayFile) Name() string { return f.basename } +func (f *overlayFile) Size() int64 { return int64(len(f.contents)) } +func (f *overlayFile) Mode() iofs.FileMode { + if f.isDir { + return iofs.ModeDir | 0o555 + } + return 0o444 +} func (f *overlayFile) ModTime() time.Time { return f.modtime } func (f *overlayFile) IsDir() bool { return f.isDir } func (f *overlayFile) Sys() interface{} { return nil } @@ -60,19 +68,42 @@ func (fs *fileSystem) getDir(dir string, create bool) map[string]*overlayFile { return m } -func (fs *fileSystem) init(c *Config) error { - fs.cwd = c.Dir +// ioFS returns an implementation of [io/fs.FS] that holds +// the contents of fs under the given filepath root. +// +// Note: we can't return an FS implementation that covers the +// entirety of fs because the overlay paths may not all share +// a common root. +// +// Note also: the returned FS also implements +// [modpkgload.OSRootFS] so that we can map +// the resulting source locations back to the filesystem +// paths required by most of the `cue/load` package +// implementation. +func (fs *fileSystem) ioFS(root string) iofs.FS { + dir := fs.getDir(root, false) + if dir == nil { + return module.OSDirFS(root) + } + return &ioFS{ + fs: fs, + root: root, + } +} - overlay := c.Overlay +func (fs *fileSystem) init(cwd string, overlay map[string]Source) error { + fs.cwd = cwd fs.overlayDirs = map[string]map[string]*overlayFile{} // Organize overlay for filename, src := range overlay { + if !filepath.IsAbs(filename) { + return fmt.Errorf("non-absolute file path %q in overlay", filename) + } // TODO: do we need to further clean the path or check that the // specified files are within the root/ absolute files? dir, base := filepath.Split(filename) m := fs.getDir(dir, true) - b, file, err := src.contents() if err != nil { return err @@ -103,66 +134,11 @@ func (fs *fileSystem) init(c *Config) error { return nil } -func (fs *fileSystem) joinPath(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *fileSystem) splitPathList(s string) []string { - return filepath.SplitList(s) -} - -func (fs *fileSystem) isAbsPath(path string) bool { - return filepath.IsAbs(path) -} - func (fs *fileSystem) makeAbs(path string) string { - if fs.isAbsPath(path) { + if filepath.IsAbs(path) { return path } - return filepath.Clean(filepath.Join(fs.cwd, path)) -} - -func (fs *fileSystem) isDir(path string) bool { - path = fs.makeAbs(path) - if fs.getDir(path, false) != nil { - return true - } - fi, err := os.Stat(path) - return err == nil && fi.IsDir() -} - -func (fs *fileSystem) hasSubdir(root, dir string) (rel string, ok bool) { - // Try using paths we received. - if rel, ok = hasSubdir(root, dir); ok { - return - } - - // Try expanding symlinks and comparing - // expanded against unexpanded and - // expanded against expanded. - rootSym, _ := filepath.EvalSymlinks(root) - dirSym, _ := filepath.EvalSymlinks(dir) - - if rel, ok = hasSubdir(rootSym, dir); ok { - return - } - if rel, ok = hasSubdir(root, dirSym); ok { - return - } - return hasSubdir(rootSym, dirSym) -} - -func hasSubdir(root, dir string) (rel string, ok bool) { - const sep = string(filepath.Separator) - root = filepath.Clean(root) - if !strings.HasSuffix(root, sep) { - root += sep - } - dir = filepath.Clean(dir) - if !strings.HasPrefix(dir, root) { - return "", false - } - return filepath.ToSlash(dir[len(root):]), true + return filepath.Join(fs.cwd, path) } func (fs *fileSystem) readDir(path string) ([]iofs.DirEntry, errors.Error) { @@ -174,23 +150,24 @@ func (fs *fileSystem) readDir(path string) ([]iofs.DirEntry, errors.Error) { return nil, errors.Wrapf(err, token.NoPos, "readDir") } } - if m != nil { - done := map[string]bool{} - for i, fi := range items { - done[fi.Name()] = true - if o := m[fi.Name()]; o != nil { - items[i] = iofs.FileInfoToDirEntry(o) - } + if m == nil { + return items, nil + } + done := map[string]bool{} + for i, fi := range items { + done[fi.Name()] = true + if o := m[fi.Name()]; o != nil { + items[i] = iofs.FileInfoToDirEntry(o) } - for _, o := range m { - if !done[o.Name()] { - items = append(items, iofs.FileInfoToDirEntry(o)) - } + } + for _, o := range m { + if !done[o.Name()] { + items = append(items, iofs.FileInfoToDirEntry(o)) } - sort.Slice(items, func(i, j int) bool { - return items[i].Name() < items[j].Name() - }) } + slices.SortFunc(items, func(a, b iofs.DirEntry) int { + return cmp.Compare(a.Name(), b.Name()) + }) return items, nil } @@ -202,7 +179,7 @@ func (fs *fileSystem) getOverlay(path string) *overlayFile { return nil } -func (fs *fileSystem) stat(path string) (os.FileInfo, errors.Error) { +func (fs *fileSystem) stat(path string) (iofs.FileInfo, errors.Error) { path = fs.makeAbs(path) if fi := fs.getOverlay(path); fi != nil { return fi, nil @@ -214,7 +191,7 @@ func (fs *fileSystem) stat(path string) (os.FileInfo, errors.Error) { return fi, nil } -func (fs *fileSystem) lstat(path string) (os.FileInfo, errors.Error) { +func (fs *fileSystem) lstat(path string) (iofs.FileInfo, errors.Error) { path = fs.makeAbs(path) if fi := fs.getOverlay(path); fi != nil { return fi, nil @@ -280,7 +257,7 @@ func (fs *fileSystem) walkRec(path string, entry iofs.DirEntry, f walkFunc) erro } for _, entry := range dir { - filename := fs.joinPath(path, entry.Name()) + filename := filepath.Join(path, entry.Name()) err = fs.walkRec(filename, entry, f) if err != nil { if !entry.IsDir() || err != skipDir { @@ -290,3 +267,120 @@ func (fs *fileSystem) walkRec(path string, entry iofs.DirEntry, f walkFunc) erro } return nil } + +var _ interface { + iofs.FS + iofs.ReadDirFS + iofs.ReadFileFS + module.OSRootFS +} = (*ioFS)(nil) + +type ioFS struct { + fs *fileSystem + root string +} + +func (fs *ioFS) OSRoot() string { + return fs.root +} + +func (fs *ioFS) Open(name string) (iofs.File, error) { + fpath, err := fs.absPathFromFSPath(name) + if err != nil { + return nil, err + } + r, err := fs.fs.openFile(fpath) + if err != nil { + return nil, err // TODO convert filepath in error to fs path + } + return &ioFSFile{ + fs: fs.fs, + path: fpath, + rc: r, + }, nil +} + +func (fs *ioFS) absPathFromFSPath(name string) (string, error) { + if !iofs.ValidPath(name) { + return "", fmt.Errorf("invalid io/fs path %q", name) + } + // Technically we should mimic Go's internal/safefilepath.fromFS + // functionality here, but as we're using this in a relatively limited + // context, we can just prohibit some characters. + if strings.ContainsAny(name, ":\\") { + return "", fmt.Errorf("invalid io/fs path %q", name) + } + return filepath.Join(fs.root, name), nil +} + +// ReadDir implements [io/fs.ReadDirFS]. +func (fs *ioFS) ReadDir(name string) ([]iofs.DirEntry, error) { + fpath, err := fs.absPathFromFSPath(name) + if err != nil { + return nil, err + } + return fs.fs.readDir(fpath) +} + +// ReadFile implements [io/fs.ReadFileFS]. +func (fs *ioFS) ReadFile(name string) ([]byte, error) { + fpath, err := fs.absPathFromFSPath(name) + if err != nil { + return nil, err + } + if fi := fs.fs.getOverlay(fpath); fi != nil { + return bytes.Clone(fi.contents), nil + } + return os.ReadFile(fpath) +} + +// ioFSFile implements [io/fs.File] for the overlay filesystem. +type ioFSFile struct { + fs *fileSystem + path string + rc io.ReadCloser + entries []iofs.DirEntry +} + +var _ interface { + iofs.File + iofs.ReadDirFile +} = (*ioFSFile)(nil) + +func (f *ioFSFile) Stat() (iofs.FileInfo, error) { + return f.fs.stat(f.path) +} + +func (f *ioFSFile) Read(buf []byte) (int, error) { + return f.rc.Read(buf) +} + +func (f *ioFSFile) Close() error { + return f.rc.Close() +} + +func (f *ioFSFile) ReadDir(n int) ([]iofs.DirEntry, error) { + if f.entries == nil { + entries, err := f.fs.readDir(f.path) + if err != nil { + return entries, err + } + if entries == nil { + entries = []iofs.DirEntry{} + } + f.entries = entries + } + if n <= 0 { + entries := f.entries + f.entries = f.entries[len(f.entries):] + return entries, nil + } + var err error + if n >= len(f.entries) { + n = len(f.entries) + err = io.EOF + } + entries := f.entries[:n] + f.entries = f.entries[n:] + return entries, err +} diff --git a/vendor/cuelang.org/go/cue/load/import.go b/vendor/cuelang.org/go/cue/load/import.go index 2842d48c86..8140dfaccf 100644 --- a/vendor/cuelang.org/go/cue/load/import.go +++ b/vendor/cuelang.org/go/cue/load/import.go @@ -15,20 +15,21 @@ package load import ( - "context" + "cmp" "fmt" + "io" + "io/fs" "os" pathpkg "path" "path/filepath" - "sort" + "slices" "strings" - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" "cuelang.org/go/internal/filetypes" - "cuelang.org/go/internal/mod/module" + "cuelang.org/go/mod/module" ) // importPkg returns details about the CUE package named by the import path, @@ -81,7 +82,6 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { if p.PkgName == "" { if l.cfg.Package == "*" { - fp.ignoreOther = true fp.allPackages = true p.PkgName = "_" } else { @@ -97,20 +97,16 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { genDir := GenPath(cfg.ModuleRoot) if strings.HasPrefix(p.Dir, genDir) { dirs = append(dirs, [2]string{genDir, p.Dir}) - // TODO(legacy): don't support "pkg" // && p.PkgName != "_" - if filepath.Base(genDir) != "pkg" { - for _, sub := range []string{"pkg", "usr"} { - rel, err := filepath.Rel(genDir, p.Dir) - if err != nil { - // should not happen - return retErr( - errors.Wrapf(err, token.NoPos, "invalid path")) - } - base := filepath.Join(cfg.ModuleRoot, modDir, sub) - dir := filepath.Join(base, rel) - dirs = append(dirs, [2]string{base, dir}) + for _, sub := range []string{"pkg", "usr"} { + rel, err := filepath.Rel(genDir, p.Dir) + if err != nil { + // should not happen + return retErr(errors.Wrapf(err, token.NoPos, "invalid path")) } + base := filepath.Join(cfg.ModuleRoot, modDir, sub) + dir := filepath.Join(base, rel) + dirs = append(dirs, [2]string{base, dir}) } } else { dirs = append(dirs, [2]string{cfg.ModuleRoot, p.Dir}) @@ -136,39 +132,36 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { // have the same module scope and that there are no invalid modules. inModule := false // if pkg == "_" for _, d := range dirs { - if l.cfg.findRoot(d[1]) != "" { + if l.cfg.findModRoot(d[1]) != "" { inModule = true break } } + // Walk the parent directories up to the module root to add their files as well, + // since a package foo/bar/baz inherits from parent packages foo/bar and foo. + // See https://cuelang.org/docs/concept/modules-packages-instances/#instances. for _, d := range dirs { - for dir := filepath.Clean(d[1]); ctxt.isDir(dir); { - files, err := ctxt.readDir(dir) - if err != nil && !os.IsNotExist(err) { - return retErr(errors.Wrapf(err, pos, "import failed reading dir %v", dirs[0][1])) + dir := filepath.Clean(d[1]) + for { + sd, ok := l.dirCachedBuildFiles[dir] + if !ok { + sd = l.scanDir(dir) + l.dirCachedBuildFiles[dir] = sd } - for _, f := range files { - if f.IsDir() { - continue - } - if f.Name() == "-" { - if _, err := cfg.fileSystem.stat("-"); !os.IsNotExist(err) { - continue - } - } - file, err := filetypes.ParseFile(f.Name(), filetypes.Input) - if err != nil { - p.UnknownFiles = append(p.UnknownFiles, &build.File{ - Filename: f.Name(), - ExcludeReason: errors.Newf(token.NoPos, "unknown filetype"), - }) - continue // skip unrecognized file types + if err := sd.err; err != nil { + if errors.Is(err, fs.ErrNotExist) { + break } - fp.add(pos, dir, file, importComment) + return retErr(errors.Wrapf(err, token.NoPos, "import failed reading dir %v", dir)) + } + p.UnknownFiles = append(p.UnknownFiles, sd.unknownFiles...) + for _, f := range sd.buildFiles { + bf := *f + fp.add(dir, &bf, importComment) } - if p.PkgName == "" || !inModule || l.cfg.isRoot(dir) || dir == d[0] { + if p.PkgName == "" || !inModule || l.cfg.isModRoot(dir) || dir == d[0] { break } @@ -192,7 +185,7 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { impPath, err := addImportQualifier(importPath(p.ImportPath), p.PkgName) p.ImportPath = string(impPath) if err != nil { - p.ReportError(err) + p.ReportError(errors.Promote(err, "")) } all = append(all, p) @@ -202,29 +195,86 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance { return all } - l.addFiles(cfg.ModuleRoot, p) + l.addFiles(p) _ = p.Complete() } - sort.Slice(all, func(i, j int) bool { - return all[i].Dir < all[j].Dir + slices.SortFunc(all, func(a, b *build.Instance) int { + // Instances may share the same directory but have different package names. + // Sort by directory first, then by package name. + if c := cmp.Compare(a.Dir, b.Dir); c != 0 { + return c + } + + return cmp.Compare(a.PkgName, b.PkgName) }) return all } -// _loadFunc is the method used for the value of l.loadFunc. -func (l *loader) _loadFunc(pos token.Pos, path string) *build.Instance { +func (l *loader) scanDir(dir string) cachedFileFiles { + sd := cachedFileFiles{} + files, err := l.cfg.fileSystem.readDir(dir) + if err != nil { + sd.err = err + return sd + } + for _, f := range files { + if f.IsDir() { + continue + } + if f.Name() == "-" { + if _, err := l.cfg.fileSystem.stat("-"); !os.IsNotExist(err) { + continue + } + } + file, err := filetypes.ParseFile(f.Name(), filetypes.Input) + if err != nil { + sd.unknownFiles = append(sd.unknownFiles, &build.File{ + Filename: f.Name(), + ExcludeReason: errors.Newf(token.NoPos, "unknown filetype"), + }) + continue // skip unrecognized file types + } + sd.buildFiles = append(sd.buildFiles, file) + } + return sd +} + +func setFileSource(cfg *Config, f *build.File) error { + if f.Source != nil { + return nil + } + fullPath := f.Filename + if fullPath == "-" { + b, err := io.ReadAll(cfg.stdin()) + if err != nil { + return errors.Newf(token.NoPos, "read stdin: %v", err) + } + f.Source = b + return nil + } + if !filepath.IsAbs(fullPath) { + fullPath = filepath.Join(cfg.Dir, fullPath) + // Ensure that encoding.NewDecoder will work correctly. + f.Filename = fullPath + } + if fi := cfg.fileSystem.getOverlay(fullPath); fi != nil { + if fi.file != nil { + f.Source = fi.file + } else { + f.Source = fi.contents + } + } + return nil +} + +func (l *loader) loadFunc(pos token.Pos, path string) *build.Instance { impPath := importPath(path) if isLocalImport(path) { return l.cfg.newErrInstance(errors.Newf(pos, "relative import paths not allowed (%q)", path)) } - // is it a builtin? - if strings.IndexByte(strings.Split(path, "/")[0], '.') == -1 { - if l.cfg.StdRoot != "" { - p := l.newInstance(pos, impPath) - _ = l.importPkg(pos, p) - return p - } + if isStdlibPackage(path) { + // It looks like a builtin. return nil } @@ -239,10 +289,6 @@ func (l *loader) newRelInstance(pos token.Pos, path, pkgName string) *build.Inst if !isLocalImport(path) { panic(fmt.Errorf("non-relative import path %q passed to newRelInstance", path)) } - fs := l.cfg.fileSystem - - var err errors.Error - dir := path p := l.cfg.Context.NewInstance(path, l.loadFunc) p.PkgName = pkgName @@ -251,22 +297,33 @@ func (l *loader) newRelInstance(pos token.Pos, path, pkgName string) *build.Inst p.Root = l.cfg.ModuleRoot p.Module = l.cfg.Module - dir = filepath.Join(l.cfg.Dir, filepath.FromSlash(path)) - + var err errors.Error if path != cleanImport(path) { err = errors.Append(err, l.errPkgf(nil, "non-canonical import path: %q should be %q", path, pathpkg.Clean(path))) } - if importPath, e := l.importPathFromAbsDir(fsPath(dir), path); e != nil { + dir := filepath.Join(l.cfg.Dir, filepath.FromSlash(path)) + if pkgPath, e := importPathFromAbsDir(l.cfg, dir, path); e != nil { // Detect later to keep error messages consistent. } else { - p.ImportPath = string(importPath) + // Add package qualifier if the configuration requires it. + name := l.cfg.Package + switch name { + case "_", "*": + name = "" + } + pkgPath, e := addImportQualifier(pkgPath, name) + if e != nil { + // Detect later to keep error messages consistent. + } else { + p.ImportPath = string(pkgPath) + } } p.Dir = dir - if fs.isAbsPath(path) || strings.HasPrefix(path, "/") { + if filepath.IsAbs(path) || strings.HasPrefix(path, "/") { err = errors.Append(err, errors.Newf(pos, "absolute import path %q not allowed", path)) } @@ -278,61 +335,50 @@ func (l *loader) newRelInstance(pos token.Pos, path, pkgName string) *build.Inst return p } -func (l *loader) importPathFromAbsDir(absDir fsPath, key string) (importPath, errors.Error) { - if l.cfg.ModuleRoot == "" { - return "", errors.Newf(token.NoPos, - "cannot determine import path for %q (root undefined)", key) +func importPathFromAbsDir(c *Config, absDir string, origPath string) (importPath, error) { + if c.ModuleRoot == "" { + return "", fmt.Errorf("cannot determine import path for %q (root undefined)", origPath) } - dir := filepath.Clean(string(absDir)) - if !strings.HasPrefix(dir, l.cfg.ModuleRoot) { - return "", errors.Newf(token.NoPos, - "cannot determine import path for %q (dir outside of root)", key) + dir := filepath.Clean(absDir) + if !strings.HasPrefix(dir, c.ModuleRoot) { + return "", fmt.Errorf("cannot determine import path for %q (dir outside of root)", origPath) } - pkg := filepath.ToSlash(dir[len(l.cfg.ModuleRoot):]) + pkg := filepath.ToSlash(dir[len(c.ModuleRoot):]) switch { case strings.HasPrefix(pkg, "/cue.mod/"): pkg = pkg[len("/cue.mod/"):] if pkg == "" { - return "", errors.Newf(token.NoPos, - "invalid package %q (root of %s)", key, modDir) + return "", fmt.Errorf("invalid package %q (root of %s)", origPath, modDir) } - // TODO(legacy): remove. - case strings.HasPrefix(pkg, "/pkg/"): - pkg = pkg[len("/pkg/"):] - if pkg == "" { - return "", errors.Newf(token.NoPos, - "invalid package %q (root of %s)", key, pkgDir) - } - - case l.cfg.Module == "": - return "", errors.Newf(token.NoPos, - "cannot determine import path for %q (no module)", key) + case c.Module == "": + return "", fmt.Errorf("cannot determine import path for %q (no module)", origPath) default: - pkg = l.cfg.Module + pkg - } - - name := l.cfg.Package - switch name { - case "_", "*": - name = "" + impPath := module.ParseImportPath(c.Module) + impPath.Path += pkg + impPath.Qualifier = "" + pkg = impPath.String() } - - return addImportQualifier(importPath(pkg), name) + return importPath(pkg), nil } func (l *loader) newInstance(pos token.Pos, p importPath) *build.Instance { - dir, name, err := l.absDirFromImportPath(pos, p) + dir, err := l.absDirFromImportPath(pos, p) i := l.cfg.Context.NewInstance(dir, l.loadFunc) + i.Err = errors.Append(i.Err, err) i.Dir = dir - i.PkgName = name + + parts := module.ParseImportPath(string(p)) + i.PkgName = parts.Qualifier + if i.PkgName == "" { + i.Err = errors.Append(i.Err, l.errPkgf([]token.Pos{pos}, "cannot determine package name for %q; set it explicitly with ':'", p)) + } i.DisplayPath = string(p) i.ImportPath = string(p) i.Root = l.cfg.ModuleRoot i.Module = l.cfg.Module - i.Err = errors.Append(i.Err, err) return i } @@ -341,81 +387,106 @@ func (l *loader) newInstance(pos token.Pos, p importPath) *build.Instance { // and a package name. The root directory must be set. // // The returned directory may not exist. -func (l *loader) absDirFromImportPath(pos token.Pos, p importPath) (absDir, name string, err errors.Error) { - if l.cfg.ModuleRoot == "" { - return "", "", errors.Newf(pos, "cannot import %q (root undefined)", p) +func (l *loader) absDirFromImportPath(pos token.Pos, p importPath) (string, errors.Error) { + dir, err := l.absDirFromImportPath1(pos, p) + if err != nil { + // Any error trying to determine the package location + // is a PackageError. + return "", l.errPkgf([]token.Pos{pos}, "%s", err.Error()) } + return dir, nil +} +func (l *loader) absDirFromImportPath1(pos token.Pos, p importPath) (absDir string, err error) { + if p == "" { + return "", fmt.Errorf("empty import path") + } + if l.cfg.ModuleRoot == "" { + return "", fmt.Errorf("cannot import %q (root undefined)", p) + } + if isStdlibPackage(string(p)) { + return "", fmt.Errorf("standard library import path %q cannot be imported as a CUE package", p) + } // Extract the package name. + parts := module.ParseImportPath(string(p)) + unqualified := parts.Unqualified().String() + if l.cfg.Registry != nil { + if l.pkgs == nil { + return "", fmt.Errorf("imports are unavailable because there is no cue.mod/module.cue file") + } + // TODO predicate registry-aware lookup on module.cue-declared CUE version? - name = string(p) - switch i := strings.LastIndexAny(name, "/:"); { - case i < 0: - case p[i] == ':': - name = string(p[i+1:]) - p = p[:i] - - default: // p[i] == '/' - mp, _, ok := module.SplitPathVersion(string(p)) - if ok { - // import of the form: example.com/foo/bar@v1 - if i := strings.LastIndex(mp, "/"); i >= 0 { - name = mp[i+1:] - } + // Note: use the canonical form of the import path because + // that's the form passed to [modpkgload.LoadPackages] + // and hence it's available by that name via Pkg. + pkg := l.pkgs.Pkg(parts.Canonical().String()) + // TODO(mvdan): using "unqualified" for the errors below doesn't seem right, + // should we not be using either the original path or the canonical path? + // The unqualified import path should only be used for filepath.FromSlash further below. + if pkg == nil { + return "", fmt.Errorf("no dependency found for package %q", unqualified) + } + if err := pkg.Error(); err != nil { + return "", fmt.Errorf("cannot find package %q: %v", unqualified, err) + } + if mv := pkg.Mod(); mv.IsLocal() { + // It's a local package that's present inside one or both of the gen, usr or pkg + // directories. Even though modpkgload tells us exactly what those directories + // are, the rest of the cue/load logic expects only a single directory for now, + // so just use that. + absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), parts.Path) } else { - name = string(p[i+1:]) + locs := pkg.Locations() + if len(locs) > 1 { + return "", fmt.Errorf("package %q unexpectedly found in multiple locations", unqualified) + } + if len(locs) == 0 { + return "", fmt.Errorf("no location found for package %q", unqualified) + } + var err error + absDir, err = absPathForSourceLoc(locs[0]) + if err != nil { + return "", fmt.Errorf("cannot determine source directory for package %q: %v", unqualified, err) + } } - } - // TODO: fully test that name is a valid identifier. - if name == "" { - err = errors.Newf(pos, "empty package name in import path %q", p) - } else if strings.IndexByte(name, '.') >= 0 { - err = errors.Newf(pos, - "cannot determine package name for %q (set explicitly with ':')", p) - } else if !ast.IsValidIdent(name) { - err = errors.Newf(pos, - "implied package identifier %q from import path %q is not valid", name, p) + return absDir, nil } - // Determine the directory. + // Determine the directory without using the registry. - sub := filepath.FromSlash(string(p)) - switch hasPrefix := strings.HasPrefix(string(p), l.cfg.Module); { + sub := filepath.FromSlash(unqualified) + switch hasPrefix := strings.HasPrefix(unqualified, l.cfg.Module); { case hasPrefix && len(sub) == len(l.cfg.Module): absDir = l.cfg.ModuleRoot - case hasPrefix && p[len(l.cfg.Module)] == '/': + case hasPrefix && unqualified[len(l.cfg.Module)] == '/': absDir = filepath.Join(l.cfg.ModuleRoot, sub[len(l.cfg.Module)+1:]) default: - // TODO predicate registry-aware lookup on module.cue-declared CUE version? - if l.cfg.Registry != nil { - var err error - absDir, err = l.externalPackageDir(p) - if err != nil { - // TODO why can't we use %w ? - return "", name, errors.Newf(token.NoPos, "cannot get directory for external module %q: %v", p, err) - } - } else { - absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), sub) - } + absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), sub) } - - return absDir, name, err + return absDir, err } -func (l *loader) externalPackageDir(p importPath) (dir string, err error) { - if l.deps == nil { - return "", fmt.Errorf("no dependency found for import path %q (no dependencies at all)", p) +func absPathForSourceLoc(loc module.SourceLoc) (string, error) { + osfs, ok := loc.FS.(module.OSRootFS) + if !ok { + return "", fmt.Errorf("cannot get absolute path for FS of type %T", loc.FS) } - m, subPath, err := l.deps.lookup(p) - if err != nil { - return "", err + osPath := osfs.OSRoot() + if osPath == "" { + return "", fmt.Errorf("cannot get absolute path for FS of type %T", loc.FS) } + return filepath.Join(osPath, loc.Dir), nil +} - dir, err = l.regClient.getModContents(context.TODO(), m) - if err != nil { - return "", fmt.Errorf("cannot get contents for %v: %v", m, err) +// isStdlibPackage reports whether pkgPath looks like +// an import from the standard library. +func isStdlibPackage(pkgPath string) bool { + firstElem, _, _ := strings.Cut(pkgPath, "/") + if firstElem == "" { + return false // absolute paths like "/foo/bar" } - return filepath.Join(dir, filepath.FromSlash(subPath)), nil + // Paths like ".foo/bar", "./foo/bar", or "foo.com/bar" are not standard library import paths. + return strings.IndexByte(firstElem, '.') == -1 } diff --git a/vendor/cuelang.org/go/cue/load/instances.go b/vendor/cuelang.org/go/cue/load/instances.go index b6500c9d23..9bf0a49f20 100644 --- a/vendor/cuelang.org/go/cue/load/instances.go +++ b/vendor/cuelang.org/go/cue/load/instances.go @@ -14,18 +14,19 @@ package load -// Files in package are to a large extent based on Go files from the following -// Go packages: -// - cmd/go/internal/load -// - go/build - import ( + "context" "fmt" - "os" + "sort" + "strconv" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" + "cuelang.org/go/internal/cueexperiment" "cuelang.org/go/internal/filetypes" + "cuelang.org/go/internal/mod/modpkgload" + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/mod/module" // Trigger the unconditional loading of all core builtin packages if load // is used. This was deemed the simplest way to avoid having to import @@ -40,54 +41,83 @@ import ( // instance, but errors that occur loading dependencies are recorded in these // dependencies. func Instances(args []string, c *Config) []*build.Instance { + ctx := context.TODO() if c == nil { c = &Config{} } + // We want to consult the CUE_EXPERIMENT flag to see whether + // consult external registries by default. + if err := cueexperiment.Init(); err != nil { + return []*build.Instance{c.newErrInstance(err)} + } newC, err := c.complete() if err != nil { return []*build.Instance{c.newErrInstance(err)} } c = newC - // TODO use predictable location - var deps *dependencies - var regClient *registryClient - if c.Registry != nil { - // TODO use configured cache directory. - tmpDir, err := os.MkdirTemp("", "cue-load-") - if err != nil { + if len(args) == 0 { + args = []string{"."} + } + // TODO: This requires packages to be placed before files. At some point this + // could be relaxed. + i := 0 + for ; i < len(args) && filetypes.IsPackage(args[i]); i++ { + } + pkgArgs := args[:i] + otherArgs := args[i:] + otherFiles, err := filetypes.ParseArgs(otherArgs) + if err != nil { + return []*build.Instance{c.newErrInstance(err)} + } + for _, f := range otherFiles { + if err := setFileSource(c, f); err != nil { return []*build.Instance{c.newErrInstance(err)} } - regClient, err = newRegistryClient(c.Registry, tmpDir) - if err != nil { - return []*build.Instance{c.newErrInstance(fmt.Errorf("cannot make registry client: %v", err))} - } - deps1, err := resolveDependencies(c.modFile, regClient) - if err != nil { - return []*build.Instance{c.newErrInstance(fmt.Errorf("cannot resolve dependencies: %v", err))} + } + if c.Package != "" && c.Package != "_" && c.Package != "*" { + // The caller has specified an explicit package to load. + // This is essentially the same as passing an explicit package + // qualifier to all package arguments that don't already have + // one. We add that qualifier here so that there's a distinction + // between package paths specified as arguments, which + // have the qualifier added, and package paths that are dependencies + // of those, which don't. + pkgArgs1 := make([]string, 0, len(pkgArgs)) + for _, p := range pkgArgs { + if ip := module.ParseImportPath(p); !ip.ExplicitQualifier { + ip.Qualifier = c.Package + p = ip.String() + } + pkgArgs1 = append(pkgArgs1, p) } - deps = deps1 - + pkgArgs = pkgArgs1 } + + synCache := newSyntaxCache(c) tg := newTagger(c) - l := newLoader(c, tg, deps, regClient) + // Pass all arguments that look like packages to loadPackages + // so that they'll be available when looking up the packages + // that are specified on the command line. + expandedPaths, err := expandPackageArgs(c, pkgArgs, c.Package, tg) + if err != nil { + return []*build.Instance{c.newErrInstance(err)} + } + pkgs, err := loadPackages(ctx, c, synCache, expandedPaths, otherFiles) + if err != nil { + return []*build.Instance{c.newErrInstance(err)} + } + l := newLoader(c, tg, synCache, pkgs) if c.Context == nil { c.Context = build.NewContext( - build.Loader(l.buildLoadFunc()), + build.Loader(l.loadFunc), build.ParseFile(c.ParseFile), ) } - // TODO: require packages to be placed before files. At some point this - // could be relaxed. - i := 0 - for ; i < len(args) && filetypes.IsPackage(args[i]); i++ { - } - a := []*build.Instance{} - - if len(args) == 0 || i > 0 { - for _, m := range l.importPaths(args[:i]) { + if len(pkgArgs) > 0 { + for _, m := range l.importPaths(pkgArgs) { if m.Err != nil { inst := c.newErrInstance(m.Err) a = append(a, inst) @@ -97,12 +127,8 @@ func Instances(args []string, c *Config) []*build.Instance { } } - if args = args[i:]; len(args) > 0 { - files, err := filetypes.ParseArgs(args) - if err != nil { - return []*build.Instance{c.newErrInstance(err)} - } - a = append(a, l.cueFilesPackage(files)) + if len(otherFiles) > 0 { + a = append(a, l.cueFilesPackage(otherFiles)) } for _, p := range a { @@ -140,3 +166,63 @@ func Instances(args []string, c *Config) []*build.Instance { return a } + +// loadPackages returns packages loaded from the given package list and also +// including imports from the given build files. +func loadPackages(ctx context.Context, cfg *Config, synCache *syntaxCache, pkgs []resolvedPackageArg, otherFiles []*build.File) (*modpkgload.Packages, error) { + if cfg.Registry == nil || cfg.modFile == nil || cfg.modFile.Module == "" { + return nil, nil + } + reqs := modrequirements.NewRequirements( + cfg.modFile.QualifiedModule(), + cfg.Registry, + cfg.modFile.DepVersions(), + cfg.modFile.DefaultMajorVersions(), + ) + mainModLoc := module.SourceLoc{ + FS: cfg.fileSystem.ioFS(cfg.ModuleRoot), + Dir: ".", + } + pkgPaths := make(map[string]bool) + // Add any packages specified directly on the command line. + for _, pkg := range pkgs { + pkgPaths[pkg.resolvedCanonical] = true + } + // Add any imports found in other files. + for _, f := range otherFiles { + if f.Encoding != build.CUE { + // not a CUE file; assume it has no imports for now. + continue + } + syntaxes, err := synCache.getSyntax(f) + if err != nil { + return nil, fmt.Errorf("cannot get syntax for %q: %v", f.Filename, err) + } + for _, syntax := range syntaxes { + for _, imp := range syntax.Imports { + pkgPath, err := strconv.Unquote(imp.Path.Value) + if err != nil { + // Should never happen. + return nil, fmt.Errorf("invalid import path %q in %s", imp.Path.Value, f.Filename) + } + // Canonicalize the path. + pkgPath = module.ParseImportPath(pkgPath).Canonical().String() + pkgPaths[pkgPath] = true + } + } + } + // TODO use maps.Keys when we can. + pkgPathSlice := make([]string, 0, len(pkgPaths)) + for p := range pkgPaths { + pkgPathSlice = append(pkgPathSlice, p) + } + sort.Strings(pkgPathSlice) + return modpkgload.LoadPackages( + ctx, + cfg.Module, + mainModLoc, + reqs, + cfg.Registry, + pkgPathSlice, + ), nil +} diff --git a/vendor/cuelang.org/go/cue/load/loader.go b/vendor/cuelang.org/go/cue/load/loader.go index e2f0752333..eed3e743da 100644 --- a/vendor/cuelang.org/go/cue/load/loader.go +++ b/vendor/cuelang.org/go/cue/load/loader.go @@ -22,10 +22,14 @@ package load import ( "path/filepath" + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" + "cuelang.org/go/cue/cuecontext" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" "cuelang.org/go/internal/encoding" + "cuelang.org/go/internal/mod/modpkgload" // Trigger the unconditional loading of all core builtin packages if load // is used. This was deemed the simplest way to avoid having to import @@ -35,27 +39,39 @@ import ( ) type loader struct { - cfg *Config - tagger *tagger - stk importStack - loadFunc build.LoadFunc - deps *dependencies - regClient *registryClient + cfg *Config + tagger *tagger + stk importStack + pkgs *modpkgload.Packages + + // syntaxCache caches the work involved when decoding a file into an *ast.File. + // This can happen multiple times for the same file, for example when it is present in + // multiple different build instances in the same directory hierarchy. + syntaxCache *syntaxCache + + // dirCachedBuildFiles caches the work involved when reading a directory + // and determining what build files it contains. + // It is keyed by directory name. + // When we descend into subdirectories to load patterns such as ./... + // we often end up loading parent directories many times over; + // this cache amortizes that work. + dirCachedBuildFiles map[string]cachedFileFiles } -func newLoader(c *Config, tg *tagger, deps *dependencies, regClient *registryClient) *loader { - l := &loader{ - cfg: c, - tagger: tg, - deps: deps, - regClient: regClient, - } - l.loadFunc = l._loadFunc - return l +type cachedFileFiles struct { + err errors.Error + buildFiles []*build.File + unknownFiles []*build.File } -func (l *loader) buildLoadFunc() build.LoadFunc { - return l.loadFunc +func newLoader(c *Config, tg *tagger, syntaxCache *syntaxCache, pkgs *modpkgload.Packages) *loader { + return &loader{ + cfg: c, + tagger: tg, + pkgs: pkgs, + dirCachedBuildFiles: map[string]cachedFileFiles{}, + syntaxCache: syntaxCache, + } } func (l *loader) abs(filename string) string { @@ -77,10 +93,8 @@ func (l *loader) errPkgf(importPos []token.Pos, format string, args ...interface // cueFilesPackage creates a package for building a collection of CUE files // (typically named on the command line). func (l *loader) cueFilesPackage(files []*build.File) *build.Instance { - cfg := l.cfg - cfg.filesMode = true // ModInit() // TODO: support modules - pkg := l.cfg.Context.NewInstance(cfg.Dir, l.loadFunc) + pkg := l.cfg.Context.NewInstance(l.cfg.Dir, l.loadFunc) for _, bf := range files { f := bf.Filename @@ -88,24 +102,28 @@ func (l *loader) cueFilesPackage(files []*build.File) *build.Instance { continue } if !filepath.IsAbs(f) { - f = filepath.Join(cfg.Dir, f) + f = filepath.Join(l.cfg.Dir, f) } - fi, err := cfg.fileSystem.stat(f) + fi, err := l.cfg.fileSystem.stat(f) if err != nil { - return cfg.newErrInstance(errors.Wrapf(err, token.NoPos, "could not find file %v", f)) + return l.cfg.newErrInstance(errors.Wrapf(err, token.NoPos, "could not find file %v", f)) } if fi.IsDir() { - return cfg.newErrInstance(errors.Newf(token.NoPos, "file is a directory %v", f)) + return l.cfg.newErrInstance(errors.Newf(token.NoPos, "file is a directory %v", f)) } } - fp := newFileProcessor(cfg, pkg, l.tagger) - for _, file := range files { - fp.add(token.NoPos, cfg.Dir, file, allowAnonymous) + fp := newFileProcessor(l.cfg, pkg, l.tagger) + if l.cfg.Package == "*" { + fp.allPackages = true + pkg.PkgName = "_" + } + for _, bf := range files { + fp.add(l.cfg.Dir, bf, allowAnonymous|allowExcludedFiles) } // TODO: ModImportFromFiles(files) - pkg.Dir = cfg.Dir + pkg.Dir = l.cfg.Dir rewriteFiles(pkg, pkg.Dir, true) for _, err := range errors.Errors(fp.finalize(pkg)) { // ImportDir(&ctxt, dir, 0) var x *NoFilesError @@ -120,31 +138,62 @@ func (l *loader) cueFilesPackage(files []*build.File) *build.Instance { // bp.ImportPath = ModDirImportPath(dir) // } - l.addFiles(cfg.Dir, pkg) - pkg.User = true - l.stk.Push("user") + l.addFiles(pkg) + _ = pkg.Complete() - l.stk.Pop() - pkg.User = true - //pkg.LocalPrefix = dirToImportPath(dir) pkg.DisplayPath = "command-line-arguments" return pkg } -func (l *loader) addFiles(dir string, p *build.Instance) { - for _, f := range p.BuildFiles { - d := encoding.NewDecoder(f, &encoding.Config{ - Stdin: l.cfg.stdin(), - ParseFile: l.cfg.ParseFile, - }) - for ; !d.Done(); d.Next() { - _ = p.AddSyntax(d.File()) - } - if err := d.Err(); err != nil { +// addFiles populates p.Files by reading CUE syntax from p.BuildFiles. +func (l *loader) addFiles(p *build.Instance) { + for _, bf := range p.BuildFiles { + files, err := l.syntaxCache.getSyntax(bf) + if err != nil { p.ReportError(errors.Promote(err, "load")) } - d.Close() + for _, f := range files { + _ = p.AddSyntax(f) + } + } +} + +type syntaxCache struct { + config encoding.Config + ctx *cue.Context + cache map[string]syntaxCacheEntry +} + +type syntaxCacheEntry struct { + err error + files []*ast.File +} + +func newSyntaxCache(cfg *Config) *syntaxCache { + return &syntaxCache{ + config: encoding.Config{ + Stdin: cfg.stdin(), + ParseFile: cfg.ParseFile, + }, + ctx: cuecontext.New(), + cache: make(map[string]syntaxCacheEntry), + } +} + +// getSyntax returns the CUE syntax corresponding to the file argument f. +func (c *syntaxCache) getSyntax(bf *build.File) ([]*ast.File, error) { + syntax, ok := c.cache[bf.Filename] + if ok { + return syntax.files, syntax.err + } + d := encoding.NewDecoder(c.ctx, bf, &c.config) + for ; !d.Done(); d.Next() { + syntax.files = append(syntax.files, d.File()) } + d.Close() + syntax.err = d.Err() + c.cache[bf.Filename] = syntax + return syntax.files, syntax.err } diff --git a/vendor/cuelang.org/go/cue/load/loader_common.go b/vendor/cuelang.org/go/cue/load/loader_common.go index 4068948030..e5544a36e7 100644 --- a/vendor/cuelang.org/go/cue/load/loader_common.go +++ b/vendor/cuelang.org/go/cue/load/loader_common.go @@ -16,39 +16,34 @@ package load import ( "bytes" - "path" + "cmp" pathpkg "path" "path/filepath" + "slices" "sort" "strconv" "strings" "unicode" "unicode/utf8" - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" ) // An importMode controls the behavior of the Import method. type importMode uint const ( - // If findOnly is set, Import stops after locating the directory - // that should contain the sources for a package. It does not - // read any files in the directory. - findOnly importMode = 1 << iota - // If importComment is set, parse import comments on package statements. // Import returns an error if it finds a comment it cannot understand // or finds conflicting comments in multiple source files. // See golang.org/s/go14customimport for more information. - importComment + importComment importMode = 1 << iota allowAnonymous + allowExcludedFiles ) func rewriteFiles(p *build.Instance, root string, isLocal bool) { @@ -64,16 +59,16 @@ func rewriteFiles(p *build.Instance, root string, isLocal bool) { // normalizeFiles sorts the files so that files contained by a parent directory // always come before files contained in sub-directories, and that filenames in // the same directory are sorted lexically byte-wise, like Go's `<` operator. -func normalizeFiles(a []*build.File) { - sort.Slice(a, func(i, j int) bool { - fi := a[i].Filename - fj := a[j].Filename - ci := strings.Count(fi, string(filepath.Separator)) - cj := strings.Count(fj, string(filepath.Separator)) - if ci != cj { - return ci < cj +func normalizeFiles(files []*build.File) { + slices.SortFunc(files, func(a, b *build.File) int { + fa := a.Filename + fb := b.Filename + ca := strings.Count(fa, string(filepath.Separator)) + cb := strings.Count(fb, string(filepath.Separator)) + if c := cmp.Compare(ca, cb); c != 0 { + return c } - return fi < fj + return cmp.Compare(fa, fb) }) } @@ -108,7 +103,6 @@ type fileProcessor struct { firstCommentFile string imported map[string][]token.Pos allTags map[string]bool - allFiles bool ignoreOther bool // ignore files from other packages allPackages bool @@ -167,14 +161,15 @@ func (fp *fileProcessor) finalize(p *build.Instance) errors.Error { return nil } -func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode importMode) (added bool) { +// add adds the given file to the appropriate package in fp. +func (fp *fileProcessor) add(root string, file *build.File, mode importMode) (added bool) { fullPath := file.Filename if fullPath != "-" { if !filepath.IsAbs(fullPath) { fullPath = filepath.Join(root, fullPath) } + file.Filename = fullPath } - file.Filename = fullPath base := filepath.Base(fullPath) @@ -188,8 +183,11 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode p.InvalidFiles = append(p.InvalidFiles, file) return true } + if err := setFileSource(fp.c, file); err != nil { + return badFile(errors.Promote(err, "")) + } - match, data, err := matchFile(fp.c, file, true, fp.allFiles, fp.allTags) + match, data, err := matchFile(fp.c, file, true, fp.allTags, mode) switch { case match: @@ -211,16 +209,17 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode return false } - pf, perr := parser.ParseFile(fullPath, data, parser.ImportsOnly, parser.ParseComments) + pf, perr := parser.ParseFile(fullPath, data, parser.ImportsOnly) if perr != nil { badFile(errors.Promote(perr, "add failed")) return true } - _, pkg, pos := internal.PackageInfo(pf) + pkg := pf.PackageName() if pkg == "" { pkg = "_" } + pos := pf.Pos() switch { case pkg == p.PkgName, mode&allowAnonymous != 0: @@ -249,7 +248,7 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode } if !fp.c.AllCUEFiles { - if err := shouldBuildFile(pf, fp); err != nil { + if err := shouldBuildFile(pf, fp.tagger); err != nil { if !errors.Is(err, errExclude) { fp.err = errors.Append(fp.err, err) } @@ -270,11 +269,13 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode p.IgnoredFiles = append(p.IgnoredFiles, file) return false } - return badFile(&MultiplePackageError{ - Dir: p.Dir, - Packages: []string{p.PkgName, pkg}, - Files: []string{fp.firstFile, base}, - }) + if !fp.allPackages { + return badFile(&MultiplePackageError{ + Dir: p.Dir, + Packages: []string{p.PkgName, pkg}, + Files: []string{fp.firstFile, base}, + }) + } } } @@ -282,7 +283,7 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode isTool := strings.HasSuffix(base, "_tool"+cueSuffix) if mode&importComment != 0 { - qcom, line := findimportComment(data) + qcom, line := findImportComment(data) if line != 0 { com, err := strconv.Unquote(qcom) if err != nil { @@ -296,23 +297,17 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode } } - for _, decl := range pf.Decls { - d, ok := decl.(*ast.ImportDecl) - if !ok { - continue + for _, spec := range pf.Imports { + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + badFile(errors.Newf( + spec.Path.Pos(), + "%s: parser returned invalid quoted string: <%s>", fullPath, quoted, + )) } - for _, spec := range d.Specs { - quoted := spec.Path.Value - path, err := strconv.Unquote(quoted) - if err != nil { - badFile(errors.Newf( - spec.Path.Pos(), - "%s: parser returned invalid quoted string: <%s>", fullPath, quoted, - )) - } - if !isTest || fp.c.Tests { - fp.imported[path] = append(fp.imported[path], spec.Pos()) - } + if !isTest || fp.c.Tests { + fp.imported[path] = append(fp.imported[path], spec.Pos()) } } switch { @@ -338,7 +333,7 @@ func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode return true } -func findimportComment(data []byte) (s string, line int) { +func findImportComment(data []byte) (s string, line int) { // expect keyword package word, data := parseWord(data) if string(word) != "package" { @@ -447,8 +442,7 @@ func isLocalImport(path string) bool { func warnUnmatched(matches []*match) { for _, m := range matches { if len(m.Pkgs) == 0 { - m.Err = - errors.Newf(token.NoPos, "cue: %q matched no packages\n", m.Pattern) + m.Err = errors.Newf(token.NoPos, "cue: %q matched no packages", m.Pattern) } } } @@ -469,14 +463,14 @@ func cleanPatterns(patterns []string) []string { a = strings.Replace(a, `\`, `/`, -1) } - // Put argument in canonical form, but preserve leading ./. + // Put argument in canonical form, but preserve leading "./". if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) + a = "./" + pathpkg.Clean(a) if a == "./." { a = "." } - } else { - a = path.Clean(a) + } else if a != "" { + a = pathpkg.Clean(a) } out = append(out, a) } @@ -484,26 +478,12 @@ func cleanPatterns(patterns []string) []string { } // isMetaPackage checks if name is a reserved package name that expands to multiple packages. +// TODO: none of these package names are actually recognized anywhere else +// and at least one (cmd) doesn't seem like it belongs in the CUE world. func isMetaPackage(name string) bool { return name == "std" || name == "cmd" || name == "all" } -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - // hasFilepathPrefix reports whether the path s begins with the // elements in prefix. func hasFilepathPrefix(s, prefix string) bool { @@ -519,100 +499,3 @@ func hasFilepathPrefix(s, prefix string) bool { return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix } } - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -// -// Note that this function is meant to evaluate whether a directory found in GOROOT -// should be treated as part of the standard library. It should not be used to decide -// that a directory found in GOPATH should be rejected: directories in GOPATH -// need not have dots in the first element, and they just take their chances -// with future collisions in the standard library. -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} - -// isRelativePath reports whether pattern should be interpreted as a directory -// path relative to the current directory, as opposed to a pattern matching -// import paths. -func isRelativePath(pattern string) bool { - return strings.HasPrefix(pattern, "./") || strings.HasPrefix(pattern, "../") || pattern == "." || pattern == ".." -} - -// inDir checks whether path is in the file tree rooted at dir. -// If so, inDir returns an equivalent path relative to dir. -// If not, inDir returns an empty string. -// inDir makes some effort to succeed even in the presence of symbolic links. -// TODO(rsc): Replace internal/test.inDir with a call to this function for Go 1.12. -func inDir(path, dir string) string { - if rel := inDirLex(path, dir); rel != "" { - return rel - } - xpath, err := filepath.EvalSymlinks(path) - if err != nil || xpath == path { - xpath = "" - } else { - if rel := inDirLex(xpath, dir); rel != "" { - return rel - } - } - - xdir, err := filepath.EvalSymlinks(dir) - if err == nil && xdir != dir { - if rel := inDirLex(path, xdir); rel != "" { - return rel - } - if xpath != "" { - if rel := inDirLex(xpath, xdir); rel != "" { - return rel - } - } - } - return "" -} - -// inDirLex is like inDir but only checks the lexical form of the file names. -// It does not consider symbolic links. -// TODO(rsc): This is a copy of str.HasFilePathPrefix, modified to -// return the suffix. Most uses of str.HasFilePathPrefix should probably -// be calling InDir instead. -func inDirLex(path, dir string) string { - pv := strings.ToUpper(filepath.VolumeName(path)) - dv := strings.ToUpper(filepath.VolumeName(dir)) - path = path[len(pv):] - dir = dir[len(dv):] - switch { - default: - return "" - case pv != dv: - return "" - case len(path) == len(dir): - if path == dir { - return "." - } - return "" - case dir == "": - return path - case len(path) > len(dir): - if dir[len(dir)-1] == filepath.Separator { - if path[:len(dir)] == dir { - return path[len(dir):] - } - return "" - } - if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir { - if len(path) == len(dir)+1 { - return "." - } - return path[len(dir)+1:] - } - return "" - } -} diff --git a/vendor/cuelang.org/go/cue/load/match.go b/vendor/cuelang.org/go/cue/load/match.go index 487948d0c6..99720be020 100644 --- a/vendor/cuelang.org/go/cue/load/match.go +++ b/vendor/cuelang.org/go/cue/load/match.go @@ -15,14 +15,13 @@ package load import ( - "io" "path/filepath" - "regexp" "strings" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/cueimports" ) // A match represents the result of matching a single package pattern. @@ -50,31 +49,18 @@ func (e excludeError) Is(err error) bool { return err == errExclude } // considers text until the first non-comment. // If allTags is non-nil, matchFile records any encountered build tag // by setting allTags[tag] = true. -func matchFile(cfg *Config, file *build.File, returnImports, allFiles bool, allTags map[string]bool) (match bool, data []byte, err errors.Error) { - if fi := cfg.fileSystem.getOverlay(file.Filename); fi != nil { - if fi.file != nil { - file.Source = fi.file - } else { - file.Source = fi.contents - } - } - +func matchFile(cfg *Config, file *build.File, returnImports bool, allTags map[string]bool, mode importMode) (match bool, data []byte, err errors.Error) { + // Note: file.Source should already have been set by setFileSource just + // after the build.File value was created. if file.Encoding != build.CUE { return false, nil, nil // not a CUE file, don't record. } - if file.Filename == "-" { - b, err2 := io.ReadAll(cfg.stdin()) - if err2 != nil { - err = errors.Newf(token.NoPos, "read stdin: %v", err) - return - } - file.Source = b - return true, b, nil // don't check shouldBuild for stdin + return true, file.Source.([]byte), nil // don't check shouldBuild for stdin } name := filepath.Base(file.Filename) - if !cfg.filesMode { + if (mode & allowExcludedFiles) == 0 { for _, prefix := range []string{".", "_"} { if strings.HasPrefix(name, prefix) { return false, nil, &excludeError{ @@ -89,7 +75,7 @@ func matchFile(cfg *Config, file *build.File, returnImports, allFiles bool, allT return false, nil, err } - data, err = readImports(f, false, nil) + data, err = cueimports.Read(f) f.Close() if err != nil { return false, nil, @@ -98,87 +84,3 @@ func matchFile(cfg *Config, file *build.File, returnImports, allFiles bool, allT return true, data, nil } - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separted pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/cuelang.org/go/cue/load/module.go b/vendor/cuelang.org/go/cue/load/module.go deleted file mode 100644 index 2c85570a99..0000000000 --- a/vendor/cuelang.org/go/cue/load/module.go +++ /dev/null @@ -1,194 +0,0 @@ -package load - -import ( - "context" - "fmt" - "io" - "path/filepath" - "strings" - - "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/token" - "cuelang.org/go/internal/mod/modfile" - "cuelang.org/go/internal/mod/module" - "cuelang.org/go/internal/mod/mvs" - "cuelang.org/go/internal/mod/semver" -) - -// loadModule loads the module file, resolves and downloads module -// dependencies. It sets c.Module if it's empty or checks it for -// consistency with the module file otherwise. -func (c *Config) loadModule() error { - // TODO: also make this work if run from outside the module? - mod := filepath.Join(c.ModuleRoot, modDir) - info, cerr := c.fileSystem.stat(mod) - if cerr != nil { - return nil - } - // TODO remove support for legacy non-directory module.cue file - // by returning an error if info.IsDir is false. - if info.IsDir() { - mod = filepath.Join(mod, moduleFile) - } - f, cerr := c.fileSystem.openFile(mod) - if cerr != nil { - return nil - } - defer f.Close() - data, err := io.ReadAll(f) - if err != nil { - return err - } - parseModFile := modfile.ParseNonStrict - if c.Registry == nil { - parseModFile = modfile.ParseLegacy - } - mf, err := parseModFile(data, mod) - if err != nil { - return err - } - c.modFile = mf - if mf.Module == "" { - // Backward compatibility: allow empty module.cue file. - // TODO maybe check that the rest of the fields are empty too? - return nil - } - if c.Module != "" && c.Module != mf.Module { - return errors.Newf(token.NoPos, "inconsistent modules: got %q, want %q", mf.Module, c.Module) - } - c.Module = mf.Module - return nil -} - -type dependencies struct { - mainModule *modfile.File - versions []module.Version -} - -// lookup returns the module corresponding to the given import path, and the relative path -// of the package beneath that. -// -// It assumes that modules are not nested. -func (deps *dependencies) lookup(pkgPath importPath) (v module.Version, subPath string, err error) { - type answer struct { - v module.Version - subPath string - } - var possible []answer - for _, dep := range deps.versions { - if subPath, ok := isParent(dep, pkgPath); ok { - possible = append(possible, answer{dep, subPath}) - } - } - switch len(possible) { - case 0: - return module.Version{}, "", fmt.Errorf("no dependency found for import path %q", pkgPath) - case 1: - return possible[0].v, possible[0].subPath, nil - } - var found *answer - for i, a := range possible { - dep, ok := deps.mainModule.Deps[a.v.Path()] - if ok && dep.Default { - if found != nil { - // More than one default. - // TODO this should be impossible and checked by modfile. - return module.Version{}, "", fmt.Errorf("more than one default module for import path %q", pkgPath) - } - found = &possible[i] - } - } - if found == nil { - return module.Version{}, "", fmt.Errorf("no default module found for import path %q", pkgPath) - } - return found.v, found.subPath, nil -} - -// resolveDependencies resolves all the versions of all the modules in the given module file, -// using regClient to fetch dependency information. -func resolveDependencies(mainModFile *modfile.File, regClient *registryClient) (*dependencies, error) { - vs, err := mvs.BuildList[module.Version](mainModFile.DepVersions(), &mvsReqs{ - mainModule: mainModFile, - regClient: regClient, - }) - if err != nil { - return nil, err - } - return &dependencies{ - mainModule: mainModFile, - versions: vs, - }, nil -} - -// mvsReqs implements mvs.Reqs by fetching information using -// regClient. -type mvsReqs struct { - module.Versions - mainModule *modfile.File - regClient *registryClient -} - -// Required implements mvs.Reqs.Required. -func (reqs *mvsReqs) Required(m module.Version) (vs []module.Version, err error) { - if m.Path() == reqs.mainModule.Module { - return reqs.mainModule.DepVersions(), nil - } - mf, err := reqs.regClient.fetchModFile(context.TODO(), m) - if err != nil { - return nil, err - } - return mf.DepVersions(), nil -} - -// Required implements mvs.Reqs.Max. -func (reqs *mvsReqs) Max(v1, v2 string) string { - if cmpVersion(v1, v2) < 0 { - return v2 - } - return v1 -} - -// cmpVersion implements the comparison for versions in the module loader. -// -// It is consistent with semver.Compare except that as a special case, -// the version "" is considered higher than all other versions. -// The main module (also known as the target) has no version and must be chosen -// over other versions of the same module in the module dependency graph. -func cmpVersion(v1, v2 string) int { - if v2 == "" { - if v1 == "" { - return 0 - } - return -1 - } - if v1 == "" { - return 1 - } - return semver.Compare(v1, v2) -} - -// isParent reports whether the module modv contains the package with the given -// path, and if so, returns its relative path within that module. -func isParent(modv module.Version, pkgPath importPath) (subPath string, ok bool) { - modBase := modv.BasePath() - pkgBase, pkgMajor, pkgHasVersion := module.SplitPathVersion(string(pkgPath)) - if !pkgHasVersion { - pkgBase = string(pkgPath) - } - - if !strings.HasPrefix(pkgBase, modBase) { - return "", false - } - if len(pkgBase) == len(modBase) { - subPath = "." - } else if pkgBase[len(modBase)] != '/' { - return "", false - } else { - subPath = pkgBase[len(modBase)+1:] - } - // It's potentially a match, but we need to check the major version too. - if !pkgHasVersion || semver.Major(modv.Version()) == pkgMajor { - return subPath, true - } - return "", false -} diff --git a/vendor/cuelang.org/go/cue/load/moduleschema.cue b/vendor/cuelang.org/go/cue/load/moduleschema.cue deleted file mode 100644 index 0f381f19ed..0000000000 --- a/vendor/cuelang.org/go/cue/load/moduleschema.cue +++ /dev/null @@ -1,23 +0,0 @@ -// module indicates the module's import path. -// For legacy reasons, we allow a missing module -// directory and an empty module directive. -module?: #Module | "" - -// deps holds dependency information for modules, keyed by module path. -deps?: [#Module]: { - // TODO numexist(<=1, replace, replaceAll) - // TODO numexist(>=1, v, exclude, replace, replaceAll) - - // v indicates the required version of the module. - // It is usually a #Semver but it can also name a branch - // of the module. cue mod tidy will rewrite such branch - // names to their canonical versions. - v?: string -} - -// #Module constraints a module path. -// TODO encode the module path rules as regexp: -// WIP: (([\-_~a-zA-Z0-9][.\-_~a-zA-Z0-9]*[\-_~a-zA-Z0-9])|([\-_~a-zA-Z0-9]))(/([\-_~a-zA-Z0-9][.\-_~a-zA-Z0-9]*[\-_~a-zA-Z0-9])|([\-_~a-zA-Z0-9]))* -#Module: =~"^[^@]+$" - -// TODO add the rest of the module schema definition. diff --git a/vendor/cuelang.org/go/cue/load/package.go b/vendor/cuelang.org/go/cue/load/package.go index 79759a5a3d..af3edfaffa 100644 --- a/vendor/cuelang.org/go/cue/load/package.go +++ b/vendor/cuelang.org/go/cue/load/package.go @@ -14,10 +14,6 @@ package load -import ( - "unicode/utf8" -) - // Package rules: // // - the package clause defines a namespace. @@ -30,20 +26,3 @@ import ( // The contents of a namespace depends on the directory that is selected as the // starting point to load a package. An instance defines a package-directory // pair. - -// safeArg reports whether arg is a "safe" command-line argument, -// meaning that when it appears in a command-line, it probably -// doesn't have some special meaning other than its own name. -// Obviously args beginning with - are not safe (they look like flags). -// Less obviously, args beginning with @ are not safe (they look like -// GNU binutils flagfile specifiers, sometimes called "response files"). -// To be conservative, we reject almost any arg beginning with non-alphanumeric ASCII. -// We accept leading . _ and / as likely in file system paths. -// There is a copy of this function in cmd/compile/internal/gc/noder.go. -func safeArg(name string) bool { - if name == "" { - return false - } - c := name[0] - return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf -} diff --git a/vendor/cuelang.org/go/cue/load/registry.go b/vendor/cuelang.org/go/cue/load/registry.go deleted file mode 100644 index c2efeb5933..0000000000 --- a/vendor/cuelang.org/go/cue/load/registry.go +++ /dev/null @@ -1,95 +0,0 @@ -package load - -import ( - "context" - "fmt" - "io" - "os" - "path" - "path/filepath" - - "cuelabs.dev/go/oci/ociregistry" - "cuelang.org/go/internal/mod/modfile" - "cuelang.org/go/internal/mod/modregistry" - "cuelang.org/go/internal/mod/module" - "cuelang.org/go/internal/mod/modzip" -) - -// registryClient implements the protocol for talking to -// the registry server. -type registryClient struct { - // TODO caching - client *modregistry.Client - cacheDir string -} - -// newRegistryClient returns a registry client that talks to -// the given base URL and stores downloaded module information -// in the given cache directory. It assumes that information -// in the registry is immutable, so if it's in the cache, a module -// will not be downloaded again. -func newRegistryClient(registry ociregistry.Interface, cacheDir string) (*registryClient, error) { - return ®istryClient{ - client: modregistry.NewClient(registry), - cacheDir: cacheDir, - }, nil -} - -// fetchModFile returns the parsed contents of the cue.mod/module.cue file -// for the given module. -func (c *registryClient) fetchModFile(ctx context.Context, m module.Version) (*modfile.File, error) { - data, err := c.fetchRawModFile(ctx, m) - if err != nil { - return nil, err - } - mf, err := modfile.Parse(data, path.Join(m.Path(), "cue.mod/module.cue")) - if err != nil { - return nil, err - } - return mf, nil -} - -// fetchModFile returns the contents of the cue.mod/module.cue file -// for the given module without parsing it. -func (c *registryClient) fetchRawModFile(ctx context.Context, mv module.Version) ([]byte, error) { - m, err := c.client.GetModule(ctx, mv) - if err != nil { - return nil, err - } - return m.ModuleFile(ctx) -} - -// getModContents downloads the module with the given version -// and returns the directory where it's stored. -func (c *registryClient) getModContents(ctx context.Context, mv module.Version) (string, error) { - modPath := filepath.Join(c.cacheDir, mv.String()) - if _, err := os.Stat(modPath); err == nil { - return modPath, nil - } - m, err := c.client.GetModule(ctx, mv) - if err != nil { - return "", err - } - r, err := m.GetZip(ctx) - if err != nil { - return "", err - } - defer r.Close() - zipfile := filepath.Join(c.cacheDir, mv.String()+".zip") - if err := os.MkdirAll(filepath.Dir(zipfile), 0o777); err != nil { - return "", fmt.Errorf("cannot create parent directory for zip file: %v", err) - } - f, err := os.Create(zipfile) - if err != nil { - return "", fmt.Errorf("cannot create zipfile: %v", err) - } - - defer f.Close() // TODO check error on close - if _, err := io.Copy(f, r); err != nil { - return "", fmt.Errorf("cannot copy data to zip file %q: %v", zipfile, err) - } - if err := modzip.Unzip(modPath, mv, zipfile); err != nil { - return "", fmt.Errorf("cannot unzip %v: %v", mv, err) - } - return modPath, nil -} diff --git a/vendor/cuelang.org/go/cue/load/search.go b/vendor/cuelang.org/go/cue/load/search.go index c821d1b796..622382c434 100644 --- a/vendor/cuelang.org/go/cue/load/search.go +++ b/vendor/cuelang.org/go/cue/load/search.go @@ -15,8 +15,7 @@ package load import ( - // TODO: remove this usage - + "fmt" "io/fs" "path" "path/filepath" @@ -25,6 +24,8 @@ import ( "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/mod/modimports" + "cuelang.org/go/mod/module" ) // TODO: should be matched from module file only. @@ -122,7 +123,7 @@ func (l *loader) matchPackages(pattern, pkgName string) *match { // matchPackagesInFS is like allPackages but is passed a pattern // beginning ./ or ../, meaning it should scan the tree rooted // at the given directory. There are ... in the pattern too. -// (See go help packages for pattern syntax.) +// (See cue help inputs for pattern syntax.) func (l *loader) matchPackagesInFS(pattern, pkgName string) *match { c := l.cfg m := &match{ @@ -150,14 +151,12 @@ func (l *loader) matchPackagesInFS(pattern, pkgName string) *match { } pkgDir := filepath.Join(root, modDir) - // TODO(legacy): remove - pkgDir2 := filepath.Join(root, "pkg") _ = c.fileSystem.walk(root, func(path string, entry fs.DirEntry, err errors.Error) errors.Error { if err != nil || !entry.IsDir() { return nil } - if path == pkgDir || path == pkgDir2 { + if path == pkgDir { return skipDir } @@ -224,7 +223,7 @@ func (l *loader) importPaths(patterns []string) []*match { return matches } -// importPathsQuiet is like ImportPaths but does not warn about patterns with no matches. +// importPathsQuiet is like importPaths but does not warn about patterns with no matches. func (l *loader) importPathsQuiet(patterns []string) []*match { var out []*match for _, a := range cleanPatterns(patterns) { @@ -269,3 +268,335 @@ func (l *loader) importPathsQuiet(patterns []string) []*match { } return out } + +type resolvedPackageArg struct { + // The original field may be needed once we want to replace the original + // package pattern matching code, as it is necessary to populate Instance.DisplayPath. + original string + resolvedCanonical string +} + +func expandPackageArgs(c *Config, pkgArgs []string, pkgQual string, tg *tagger) ([]resolvedPackageArg, error) { + expanded := make([]resolvedPackageArg, 0, len(pkgArgs)) + for _, p := range pkgArgs { + var err error + expanded, err = appendExpandedPackageArg(c, expanded, p, pkgQual, tg) + if err != nil { + return nil, err + } + } + return expanded, nil +} + +// appendExpandedPackageArg appends all the package paths matched by p to pkgPaths +// and returns the result. It also cleans the paths and makes them absolute. +// +// pkgQual is used to determine which packages to match when wildcards are expanded. +// Its semantics follow those of [Config.Package]. +func appendExpandedPackageArg(c *Config, pkgPaths []resolvedPackageArg, p string, pkgQual string, tg *tagger) ([]resolvedPackageArg, error) { + origp := p + if filepath.IsAbs(p) { + return nil, fmt.Errorf("cannot use absolute directory %q as package path", p) + } + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + p = filepath.ToSlash(p) + + ip := module.ParseImportPath(p) + + isRel := strings.HasPrefix(ip.Path, "./") + // Put argument in canonical form. + ip.Path = path.Clean(ip.Path) + if isRel && ip.Path != "." { + // Preserve leading "./". + ip.Path = "./" + ip.Path + } + isLocal := isLocalImport(ip.Path) + // Note that when c.Module is empty, c.ModuleRoot is sometimes, + // but not always, the same as c.Dir. Specifically it might point + // to the directory containing a cue.mod directory even if that + // directory doesn't actually contain a module.cue file. + moduleRoot := c.ModuleRoot + if isLocal { + if c.Module != "" { + // Make local import paths into absolute paths inside + // the module root. + absPath := path.Join(c.Dir, ip.Path) + pkgPath, err := importPathFromAbsDir(c, absPath, origp) + if err != nil { + return nil, err + } + ip1 := module.ParseImportPath(string(pkgPath)) + // Leave ip.Qualifier and ip.ExplicitQualifier intact. + ip.Path = ip1.Path + ip.Version = ip1.Version + } else { + // There's no module, so we can't make + // the import path absolute. + moduleRoot = c.Dir + } + } + if !strings.Contains(ip.Path, "...") { + if isLocal && !ip.ExplicitQualifier { + // A package qualifier has not been explicitly specified for a local + // import path so we need to walk the package directory to find the + // packages in it. We have a special rule for local imports because it's + // inconvenient always to have to specify a package qualifier when + // there's only one package in the current directory but the last + // component of its package path does not match its name. + return appendExpandedUnqualifiedPackagePath(pkgPaths, origp, ip, pkgQual, module.SourceLoc{ + FS: c.fileSystem.ioFS(moduleRoot), + Dir: ".", + }, c.Module, tg) + } + return append(pkgPaths, resolvedPackageArg{origp, ip.Canonical().String()}), nil + } + // Strip the module prefix, leaving only the directory relative + // to the module root. + ip, ok := cutModulePrefix(ip, c.Module) + if !ok { + return nil, fmt.Errorf("pattern not allowed in external package path %q", origp) + } + return appendExpandedWildcardPackagePath(pkgPaths, ip, pkgQual, module.SourceLoc{ + FS: c.fileSystem.ioFS(moduleRoot), + Dir: ".", + }, c.Module, tg) +} + +// appendExpandedUnqualifiedPackagePath expands the given import path, +// which is relative to the root of the module, into its resolved and +// qualified package paths according to the following rules (the first rule +// that applies is used) +// +// 1. if pkgQual is "*", it chooses all the packages present in the +// package directory. +// 2. if pkgQual is "_", it looks for a package file with no package name. +// 3. if there's a package named after ip.Qualifier it chooses that +// 4. if there's exactly one package in the directory it will choose that. +// 5. if there's more than one package in the directory, it returns a MultiplePackageError. +// 6. if there are no package files in the directory, it just appends the import path as is, leaving it +// to later logic to produce an error in this case. +func appendExpandedUnqualifiedPackagePath(pkgPaths []resolvedPackageArg, origp string, ip module.ImportPath, pkgQual string, mainModRoot module.SourceLoc, mainModPath string, tg *tagger) (_ []resolvedPackageArg, _err error) { + ipRel, ok := cutModulePrefix(ip, mainModPath) + if !ok { + // Should never happen. + return nil, fmt.Errorf("internal error: local import path %q in module %q has resulted in non-internal package %q", origp, mainModPath, ip) + } + dir := path.Join(mainModRoot.Dir, ipRel.Path) + info, err := fs.Stat(mainModRoot.FS, dir) + if err != nil { + // The package directory doesn't exist. + // Treat it like an empty directory and let later logic deal with it. + return append(pkgPaths, resolvedPackageArg{origp, ip.String()}), nil + } + if !info.IsDir() { + return nil, fmt.Errorf("%s is a file and not a package directory", origp) + } + iter := modimports.PackageFiles(mainModRoot.FS, dir, "*") + + // 1. if pkgQual is "*", it appends all the packages present in the package directory. + if pkgQual == "*" { + wasAdded := make(map[string]bool) + iter(func(f modimports.ModuleFile, err error) bool { + if err != nil { + _err = err + return false + } + if err := shouldBuildFile(f.Syntax, tg); err != nil { + // Later build logic should pick up and report the same error. + return true + } + pkgName := f.Syntax.PackageName() + if wasAdded[pkgName] { + return true + } + wasAdded[pkgName] = true + ip := ip + ip.Qualifier = pkgName + p := ip.String() + pkgPaths = append(pkgPaths, resolvedPackageArg{p, p}) + return true + }) + if _err != nil { + return nil, _err + } + return pkgPaths, nil + } + var files []modimports.ModuleFile + foundQualifier := false + // TODO(rog): for f, err := range iter { + iter(func(f modimports.ModuleFile, err error) bool { + if err != nil { + _err = err + return false + } + pkgName := f.Syntax.PackageName() + // 2. if pkgQual is "_", it looks for a package file with no package name. + // 3. if there's a package named after ip.Qualifier it chooses that + if (pkgName != "" && pkgName == ip.Qualifier) || (pkgQual == "_" && pkgName == "") { + foundQualifier = true + return false + } + if pkgName != "" { + files = append(files, f) + } + return true + }) + if _err != nil { + return nil, _err + } + if foundQualifier { + // We found the actual package that was implied by the import path (or pkgQual == "_"). + // This takes precedence over anything else. + return append(pkgPaths, resolvedPackageArg{origp, ip.String()}), nil + } + if len(files) == 0 { + // 6. if there are no package files in the directory, it just appends the import path as is, + // leaving it to later logic to produce an error in this case. + return append(pkgPaths, resolvedPackageArg{origp, ip.String()}), nil + } + pkgName := files[0].Syntax.PackageName() + for _, f := range files[1:] { + // 5. if there's more than one package in the directory, it returns a MultiplePackageError. + if pkgName1 := f.Syntax.PackageName(); pkgName1 != pkgName { + return nil, &MultiplePackageError{ + Dir: dir, + Packages: []string{pkgName, pkgName1}, + Files: []string{ + path.Base(files[0].FilePath), + path.Base(f.FilePath), + }, + } + } + } + // 4. if there's exactly one package in the directory it will choose that. + ip.Qualifier = pkgName + return append(pkgPaths, resolvedPackageArg{origp, ip.String()}), nil +} + +// appendExpandedWildcardPackagePath expands the given pattern into any packages that it matches +// and appends the results to pkgPaths. It returns an error if the pattern matches nothing. +// +// Note: +// * We know that pattern contains "..." +// * We know that pattern is relative to the module root +func appendExpandedWildcardPackagePath(pkgPaths []resolvedPackageArg, pattern module.ImportPath, pkgQual string, mainModRoot module.SourceLoc, mainModPath string, tg *tagger) (_ []resolvedPackageArg, _err error) { + modIpath := module.ParseImportPath(mainModPath) + // Find directory to begin the scan. + // Could be smarter but this one optimization is enough for now, + // since ... is usually at the end of a path. + // TODO: strip package qualifier. + i := strings.Index(pattern.Path, "...") + dir, _ := path.Split(pattern.Path[:i]) + dir = path.Join(mainModRoot.Dir, dir) + var isSelected func(string) bool + switch pkgQual { + case "_": + isSelected = func(pkgName string) bool { + return pkgName == "" + } + case "*": + isSelected = func(pkgName string) bool { + return true + } + case "": + isSelected = func(pkgName string) bool { + // The package ambiguity logic will be triggered if there's more than one + // package in the same directory. + return pkgName != "" + } + default: + isSelected = func(pkgName string) bool { + return pkgName == pkgQual + } + } + + var prevFile modimports.ModuleFile + var prevImportPath module.ImportPath + iter := modimports.AllModuleFiles(mainModRoot.FS, dir) + iter(func(f modimports.ModuleFile, err error) bool { + if err != nil { + return false + } + if err := shouldBuildFile(f.Syntax, tg); err != nil { + // Later build logic should pick up and report the same error. + return true + } + pkgName := f.Syntax.PackageName() + if !isSelected(pkgName) { + return true + } + if pkgName == "" { + pkgName = "_" + } + ip := module.ImportPath{ + Path: path.Join(modIpath.Path, path.Dir(f.FilePath)), + Qualifier: pkgName, + Version: modIpath.Version, + } + if modIpath.Path == "" { + // There's no module, so make sure that the path still looks like a relative import path. + if !strings.HasPrefix(ip.Path, "../") { + ip.Path = "./" + ip.Path + } + } + if ip == prevImportPath { + // TODO(rog): this isn't sufficient for full deduplication: we can get an alternation of different + // package names within the same directory. We'll need to maintain a map. + return true + } + if pkgQual == "" { + // Note: we can look at the previous item only rather than maintaining a map + // because modimports.AllModuleFiles guarantees that files in the same + // package are always adjacent. + if prevFile.FilePath != "" && prevImportPath.Path == ip.Path && ip.Qualifier != prevImportPath.Qualifier { + // A wildcard isn't currently allowed to match multiple packages + // in a single directory. + _err = &MultiplePackageError{ + Dir: path.Dir(f.FilePath), + Packages: []string{prevImportPath.Qualifier, ip.Qualifier}, + Files: []string{ + path.Base(prevFile.FilePath), + path.Base(f.FilePath), + }, + } + return false + } + } + pkgPaths = append(pkgPaths, resolvedPackageArg{ip.String(), ip.String()}) + prevFile, prevImportPath = f, ip + return true + }) + return pkgPaths, _err +} + +// cutModulePrefix strips the given module path from p and reports whether p is inside mod. +// It returns a relative package path within m. +// +// If p does not contain a major version suffix but otherwise matches mod, it counts as a match. +func cutModulePrefix(p module.ImportPath, mod string) (module.ImportPath, bool) { + if mod == "" { + return p, true + } + modPath, modVers, ok := module.SplitPathVersion(mod) + if !ok { + modPath = mod + } + if !strings.HasPrefix(p.Path, modPath) { + return module.ImportPath{}, false + } + if p.Path == modPath { + p.Path = "." + return p, true + } + if p.Path[len(modPath)] != '/' { + return module.ImportPath{}, false + } + if p.Version != "" && modVers != "" && p.Version != modVers { + return module.ImportPath{}, false + } + p.Path = "." + p.Path[len(modPath):] + p.Version = "" + return p, true +} diff --git a/vendor/cuelang.org/go/cue/load/tags.go b/vendor/cuelang.org/go/cue/load/tags.go index 1f399ab5bb..89c4e268db 100644 --- a/vendor/cuelang.org/go/cue/load/tags.go +++ b/vendor/cuelang.org/go/cue/load/tags.go @@ -85,7 +85,10 @@ func DefaultTagVars() map[string]TagVar { "username": { Func: func() (ast.Expr, error) { u, err := user.Current() - return varToString(u.Username, err) + if err != nil { + return nil, err + } + return ast.NewString(u.Username), nil }, }, "hostname": { @@ -326,8 +329,8 @@ func (tg *tagger) injectTags(tags []string) errors.Error { // shouldBuildFile determines whether a File should be included based on its // attributes. -func shouldBuildFile(f *ast.File, fp *fileProcessor) errors.Error { - tags := fp.c.Tags +func shouldBuildFile(f *ast.File, tagger *tagger) errors.Error { + tags := tagger.cfg.Tags a, errs := getBuildAttr(f) if errs != nil { @@ -349,7 +352,7 @@ func shouldBuildFile(f *ast.File, fp *fileProcessor) errors.Error { tagMap[t] = !strings.ContainsRune(t, '=') } - c := checker{tags: tagMap, tagger: fp.tagger} + c := checker{tags: tagMap, tagger: tagger} include := c.shouldInclude(expr) if c.err != nil { return c.err diff --git a/vendor/cuelang.org/go/cue/load/test.cue b/vendor/cuelang.org/go/cue/load/test.cue deleted file mode 100644 index 3b90117c22..0000000000 --- a/vendor/cuelang.org/go/cue/load/test.cue +++ /dev/null @@ -1,3 +0,0 @@ -package test - -"Hello world!" \ No newline at end of file diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go index dabf86d987..6e1bbf9171 100644 --- a/vendor/cuelang.org/go/cue/marshal.go +++ b/vendor/cuelang.org/go/cue/marshal.go @@ -128,19 +128,20 @@ func (r *Runtime) Unmarshal(b []byte) ([]*Instance, error) { // // The stored instances are functionally the same, but preserving of file // information is only done on a best-effort basis. -func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { +func (r *Runtime) Marshal(values ...InstanceOrValue) (b []byte, err error) { staged := []instanceData{} done := map[string]int{} var errs errors.Error - var stageInstance func(i *Instance) (pos int) - stageInstance = func(i *Instance) (pos int) { - if p, ok := done[i.ImportPath]; ok { + var stageInstance func(i Value) (pos int) + stageInstance = func(i Value) (pos int) { + inst := i.BuildInstance() + if p, ok := done[inst.ImportPath]; ok { return p } // TODO: support exporting instance - file, _ := export.Def(r.runtime(), i.inst.ID(), i.root) + file, _ := export.Def(r.runtime(), inst.ID(), i.instance().root) imports := []string{} file.VisitImports(func(i *ast.ImportDecl) { for _, spec := range i.Specs { @@ -149,15 +150,14 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { } }) - if i.PkgName != "" { - p, name, _ := internal.PackageInfo(file) - if p == nil { - pkg := &ast.Package{Name: ast.NewIdent(i.PkgName)} + if inst.PkgName != "" { + pi := internal.GetPackageInfo(file) + if pi.Package == nil { + pkg := &ast.Package{Name: ast.NewIdent(inst.PkgName)} file.Decls = append([]ast.Decl{pkg}, file.Decls...) - } else if name != i.PkgName { - // p is guaranteed to be generated by Def, so it is "safe" to - // modify. - p.Name = ast.NewIdent(i.PkgName) + } else if pi.Name != inst.PkgName { + // pi is guaranteed to be generated by Def, so it is "safe" to modify. + pi.Package.Name = ast.NewIdent(inst.PkgName) } } @@ -165,12 +165,12 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { errs = errors.Append(errs, errors.Promote(err, "marshal")) filename := "unmarshal" - if i.inst != nil && len(i.inst.Files) == 1 { - filename = i.inst.Files[0].Filename + if len(inst.Files) == 1 { + filename = inst.Files[0].Filename - dir := i.Dir - if i.inst != nil && i.inst.Root != "" { - dir = i.inst.Root + dir := inst.Dir + if inst.Root != "" { + dir = inst.Root } if dir != "" { filename = filepath.FromSlash(filename) @@ -180,7 +180,7 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { } // TODO: this should probably be changed upstream, but as the path // is for reference purposes only, this is safe. - importPath := filepath.ToSlash(i.ImportPath) + importPath := filepath.ToSlash(i.instance().ImportPath) staged = append(staged, instanceData{ Path: importPath, @@ -194,14 +194,14 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { if i == nil || !strings.Contains(imp, ".") { continue // a builtin package. } - stageInstance(i) + stageInstance(i.Value()) } return p } - for _, i := range instances { - staged[stageInstance(i)].Root = true + for _, val := range values { + staged[stageInstance(val.Value())].Root = true } buf := &bytes.Buffer{} diff --git a/vendor/cuelang.org/go/cue/op.go b/vendor/cuelang.org/go/cue/op.go index 22b31a5d12..6f08e4e04a 100644 --- a/vendor/cuelang.org/go/cue/op.go +++ b/vendor/cuelang.org/go/cue/op.go @@ -15,7 +15,6 @@ package cue import ( - "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" ) @@ -60,123 +59,3 @@ const ( InterpolationOp Op = adt.InterpolationOp ) - -// isCmp reports whether an op is a comparator. -func (op op) isCmp() bool { - return opEql <= op && op <= opGeq -} - -func (op op) unifyType() (unchecked, ok bool) { - if op == opUnifyUnchecked { - return true, true - } - return false, op == opUnify -} - -type op uint16 - -const ( - opUnknown op = iota - - opUnify - opUnifyUnchecked - opDisjunction - - opLand - opLor - opNot - - opEql - opNeq - opMat - opNMat - - opLss - opGtr - opLeq - opGeq - - opAdd - opSub - opMul - opQuo - opRem - - opIDiv - opIMod - opIQuo - opIRem -) - -var opStrings = []string{ - opUnknown: "??", - - opUnify: "&", - // opUnifyUnchecked is internal only. Syntactically this is - // represented as embedding. - opUnifyUnchecked: "&!", - opDisjunction: "|", - - opLand: "&&", - opLor: "||", - opNot: "!", - - opEql: "==", - opNeq: "!=", - opMat: "=~", - opNMat: "!~", - - opLss: "<", - opGtr: ">", - opLeq: "<=", - opGeq: ">=", - - opAdd: "+", - opSub: "-", - opMul: "*", - opQuo: "/", - - opIDiv: "div", - opIMod: "mod", - opIQuo: "quo", - opIRem: "rem", -} - -func (op op) String() string { return opStrings[op] } - -var tokenMap = map[token.Token]op{ - token.OR: opDisjunction, // | - token.AND: opUnify, // & - - token.ADD: opAdd, // + - token.SUB: opSub, // - - token.MUL: opMul, // * - token.QUO: opQuo, // / - - token.IDIV: opIDiv, // div - token.IMOD: opIMod, // mod - token.IQUO: opIQuo, // quo - token.IREM: opIRem, // rem - - token.LAND: opLand, // && - token.LOR: opLor, // || - - token.EQL: opEql, // == - token.LSS: opLss, // < - token.GTR: opGtr, // > - token.NOT: opNot, // ! - - token.NEQ: opNeq, // != - token.LEQ: opLeq, // <= - token.GEQ: opGeq, // >= - token.MAT: opMat, // =~ - token.NMAT: opNMat, // !~ -} - -var opMap = map[op]token.Token{} - -func init() { - for t, o := range tokenMap { - opMap[o] = t - } -} diff --git a/vendor/cuelang.org/go/cue/parser/doc.go b/vendor/cuelang.org/go/cue/parser/doc.go index adde13989b..417eaff3b2 100644 --- a/vendor/cuelang.org/go/cue/parser/doc.go +++ b/vendor/cuelang.org/go/cue/parser/doc.go @@ -20,4 +20,4 @@ // The parser accepts a larger language than is syntactically permitted by the // CUE spec, for simplicity, and for improved robustness in the presence of // syntax errors. -package parser // import "cuelang.org/go/cue/parser" +package parser diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go index af498773c5..337f225289 100644 --- a/vendor/cuelang.org/go/cue/parser/interface.go +++ b/vendor/cuelang.org/go/cue/parser/interface.go @@ -118,8 +118,10 @@ const ( ) // FileOffset specifies the File position info to use. +// +// Deprecated: this has no effect. func FileOffset(pos int) Option { - return func(p *parser) { p.offset = pos } + return func(p *parser) {} } // A mode value is a set of flags (or 0). @@ -159,7 +161,7 @@ const ( func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, err error) { // get source - text, err := source.Read(filename, src) + text, err := source.ReadAll(filename, src) if err != nil { return nil, err } @@ -172,9 +174,8 @@ func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, e // set result values if f == nil { - // source is not a valid Go source file - satisfy - // ParseFile API and return a valid (but) empty - // *File + // source is not a valid CUE source file - satisfy + // ParseFile API and return a valid (but) empty *File f = &ast.File{ // Scope: NewScope(nil), } @@ -201,7 +202,7 @@ func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, e // be nil. func ParseExpr(filename string, src interface{}, mode ...Option) (ast.Expr, error) { // get source - text, err := source.Read(filename, src) + text, err := source.ReadAll(filename, src) if err != nil { return nil, err } diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go index 5a3e336b52..4b88a1cae3 100644 --- a/vendor/cuelang.org/go/cue/parser/parser.go +++ b/vendor/cuelang.org/go/cue/parser/parser.go @@ -25,15 +25,11 @@ import ( "cuelang.org/go/cue/scanner" "cuelang.org/go/cue/token" "cuelang.org/go/internal" - "cuelang.org/go/internal/astinternal" ) -var debugStr = astinternal.DebugStr - // The parser structure holds the parser's internal state. type parser struct { file *token.File - offset int errors errors.Error scanner scanner.Scanner @@ -53,11 +49,11 @@ type parser struct { lit string // token literal // Error recovery - // (used to limit the number of calls to syncXXX functions + // (used to limit the number of calls to sync... functions // w/o making scanning progress - avoids potential endless // loops across multiple parser functions during error recovery) syncPos token.Pos // last synchronization position - syncCnt int // number of calls to syncXXX without progress + syncCnt int // number of calls to sync... functions without progress // Non-syntactic parser control exprLev int // < 0: in control clause, >= 0: in expression @@ -68,11 +64,10 @@ type parser struct { } func (p *parser) init(filename string, src []byte, mode []Option) { - p.offset = -1 for _, f := range mode { f(p) } - p.file = token.NewFile(filename, p.offset, len(src)) + p.file = token.NewFile(filename, -1, len(src)) var m scanner.Mode if p.mode&parseCommentsMode != 0 { @@ -116,7 +111,7 @@ func (p *parser) openComments() *commentState { groups = append(groups, cg) } } - groups = append(groups, c.lastChild.Comments()...) + groups = append(groups, ast.Comments(c.lastChild)...) for _, cg := range c.groups { if cg.Position != 0 { cg.Position = c.lastPos @@ -167,7 +162,7 @@ func (p *parser) closeList() { if c.lastChild != nil { for _, cg := range c.groups { cg.Position = c.lastPos - c.lastChild.AddComment(cg) + ast.AddComment(c.lastChild, cg) } c.groups = nil } @@ -208,7 +203,7 @@ func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node { for _, cg := range c.groups { if n != nil { if cg != nil { - n.AddComment(cg) + ast.AddComment(n, cg) } } } @@ -1672,7 +1667,7 @@ func (p *parser) parseFile() *ast.File { c := p.comments // Don't bother parsing the rest if we had errors scanning the first - // Likely not a Go source file at all. + // Likely not a CUE source file at all. if p.errors != nil { return nil } diff --git a/vendor/cuelang.org/go/cue/path.go b/vendor/cuelang.org/go/cue/path.go index 41cfd4753b..cfa03c3dc5 100644 --- a/vendor/cuelang.org/go/cue/path.go +++ b/vendor/cuelang.org/go/cue/path.go @@ -467,9 +467,9 @@ func isHiddenOrDefinition(s string) bool { return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") } -// Hid returns a selector for a hidden field. It panics is pkg is empty. +// Hid returns a selector for a hidden field. It panics if pkg is empty. // Hidden fields are scoped by package, and pkg indicates for which package -// the hidden field must apply.For anonymous packages, it must be set to "_". +// the hidden field must apply. For anonymous packages, it must be set to "_". func Hid(name, pkg string) Selector { if !ast.IsValidIdent(name) { panic(fmt.Sprintf("invalid identifier %s", name)) diff --git a/vendor/cuelang.org/go/cue/scanner/scanner.go b/vendor/cuelang.org/go/cue/scanner/scanner.go index b1f7d9e993..edf483becc 100644 --- a/vendor/cuelang.org/go/cue/scanner/scanner.go +++ b/vendor/cuelang.org/go/cue/scanner/scanner.go @@ -15,7 +15,7 @@ // Package scanner implements a scanner for CUE source text. It takes a []byte // as source which can then be tokenized through repeated calls to the Scan // method. -package scanner // import "cuelang.org/go/cue/scanner" +package scanner import ( "bytes" diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go index 577254f82a..a0542df4ad 100644 --- a/vendor/cuelang.org/go/cue/token/position.go +++ b/vendor/cuelang.org/go/cue/token/position.go @@ -123,16 +123,16 @@ const ( // not rendered at all. Elided - // NoSpace indicates there is no whitespace after this token. + // NoSpace indicates there is no whitespace before this token. NoSpace - // Blank means there is horizontal space after this token. + // Blank means there is horizontal space before this token. Blank - // Newline means there is a single newline after this token. + // Newline means there is a single newline before this token. Newline - // NewSection means there are two or more newlines after this token. + // NewSection means there are two or more newlines before this token. NewSection relMask = 0xf @@ -149,7 +149,7 @@ func (p RelPos) Pos() Pos { return Pos{nil, int(p)} } -// HasRelPos repors whether p has a relative position. +// HasRelPos reports whether p has a relative position. func (p Pos) HasRelPos() bool { return p.offset&relMask != 0 @@ -175,7 +175,7 @@ func (p Pos) IsValid() bool { } // IsNewline reports whether the relative information suggests this node should -// be printed on a new lien. +// be printed on a new line. func (p Pos) IsNewline() bool { return p.RelPos() >= Newline } @@ -199,26 +199,35 @@ func toPos(x index) int { // ----------------------------------------------------------------------------- // File +// index represents an offset into the file. +// It's 1-based rather than zero-based so that +// we can distinguish the zero Pos from a Pos that +// just has a zero offset. type index int // A File has a name, size, and line offset table. type File struct { mutex sync.RWMutex name string // file name as provided to AddFile - base index // Pos index range for this file is [base...base+size] - size index // file size as provided to AddFile + // base is deprecated and stored only so that [File.Base] + // can continue to return the same value passed to [NewFile]. + base index + size index // file size as provided to AddFile // lines and infos are protected by set.mutex lines []index // lines contains the offset of the first character for each line (the first entry is always 0) infos []lineInfo } -// NewFile returns a new file. -func NewFile(filename string, base, size int) *File { - if base < 0 { - base = 1 +// NewFile returns a new file with the given OS file name. The size provides the +// size of the whole file. +// +// The second argument is deprecated. It has no effect. +func NewFile(filename string, deprecatedBase, size int) *File { + if deprecatedBase < 0 { + deprecatedBase = 1 } - return &File{sync.RWMutex{}, filename, index(base), index(size), []index{0}, nil} + return &File{sync.RWMutex{}, filename, index(deprecatedBase), index(size), []index{0}, nil} } // Name returns the file name of file f as registered with AddFile. @@ -226,12 +235,14 @@ func (f *File) Name() string { return f.name } -// Base returns the base offset of file f as registered with AddFile. +// Base returns the base offset of file f as passed to NewFile. +// +// Deprecated: this method just returns the (deprecated) second argument passed to NewFile. func (f *File) Base() int { return int(f.base) } -// Size returns the size of file f as registered with AddFile. +// Size returns the size of file f as passed to NewFile. func (f *File) Size() int { return int(f.size) } @@ -278,6 +289,19 @@ func (f *File) MergeLine(line int) { f.lines = f.lines[:len(f.lines)-1] } +// Lines returns the effective line offset table of the form described by [File.SetLines]. +// Callers must not mutate the result. +func (f *File) Lines() []int { + var lines []int + f.mutex.Lock() + // Unfortunate that we have to loop, but we use our own type. + for _, line := range f.lines { + lines = append(lines, int(line)) + } + f.mutex.Unlock() + return lines +} + // SetLines sets the line offsets for a file and reports whether it succeeded. // The line offsets are the offsets of the first character of each line; // for instance for the content "ab\nc\n" the line offsets are {0, 3}. @@ -359,7 +383,7 @@ func (f *File) Pos(offset int, rel RelPos) Pos { if index(offset) > f.size { panic("illegal file offset") } - return Pos{f, toPos(f.base+index(offset)) + int(rel)} + return Pos{f, toPos(1+index(offset)) + int(rel)} } // Offset returns the offset for the given file position p; @@ -367,10 +391,10 @@ func (f *File) Pos(offset int, rel RelPos) Pos { // f.Offset(f.Pos(offset)) == offset. func (f *File) Offset(p Pos) int { x := p.index() - if x < f.base || x > f.base+index(f.size) { + if x < 1 || x > 1+index(f.size) { panic("illegal Pos value") } - return int(x - f.base) + return int(x - 1) } // Line returns the line number for the given file position p; @@ -405,7 +429,7 @@ func (f *File) unpack(offset index, adjusted bool) (filename string, line, colum } func (f *File) position(p Pos, adjusted bool) (pos Position) { - offset := p.index() - f.base + offset := p.index() - 1 pos.Offset = int(offset) pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) return @@ -418,7 +442,7 @@ func (f *File) position(p Pos, adjusted bool) (pos Position) { func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { x := p.index() if p != NoPos { - if x < f.base || x > f.base+f.size { + if x < 1 || x > 1+f.size { panic("illegal Pos value") } pos = f.position(p, adjusted) diff --git a/vendor/cuelang.org/go/cue/token/token.go b/vendor/cuelang.org/go/cue/token/token.go index f09d2a38c4..05579d4049 100644 --- a/vendor/cuelang.org/go/cue/token/token.go +++ b/vendor/cuelang.org/go/cue/token/token.go @@ -14,36 +14,43 @@ // Package token defines constants representing the lexical tokens of the Go // programming language and basic operations on tokens (printing, predicates). -package token // import "cuelang.org/go/cue/token" - -import "strconv" +package token // Token is the set of lexical tokens of the CUE configuration language. type Token int +//go:generate go run golang.org/x/tools/cmd/stringer -type=Token -linecomment + // The list of tokens. const ( // Special tokens ILLEGAL Token = iota EOF COMMENT - ATTRIBUTE // @foo(bar,baz=4) + // e.g. @foo(bar,baz=4) + ATTRIBUTE - literalBeg // Identifiers and basic type literals // (these tokens stand for classes of literals) - IDENT // main, _tmp - INT // 12_345Mi, 0700, 0xdeadbeef, 1.2M - FLOAT // 123.45, - // DURATION // 3m4s TODO - STRING // "abc" - INTERPOLATION // a part of a template string, e.g. `"age: \(` - BOTTOM // _|_ + literalBeg + // e.g. main, _tmp + IDENT + // e.g. 12_345Mi, 0700, 0xdeadbeef, 1.2M + INT + // e.g. 123.45 + FLOAT + // e.g. 3m4s; TODO + // DURATION + // e.g. "abc" + STRING + // a part of a template string, e.g. `"age: \(` + INTERPOLATION + BOTTOM // _|_ literalEnd - operatorBeg // Operators and delimiters + operatorBeg ADD // + SUB // - MUL // * @@ -92,106 +99,20 @@ const ( keywordBeg - IF - FOR - IN - LET - FUNC // experimental + IF // if + FOR // for + IN // in + LET // let + // experimental + FUNC // func - TRUE - FALSE - NULL + TRUE // true + FALSE // false + NULL // null keywordEnd ) -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - COMMENT: "COMMENT", - - IDENT: "IDENT", - INT: "INT", - FLOAT: "FLOAT", - STRING: "STRING", - INTERPOLATION: "INTERPOLATION", - ATTRIBUTE: "ATTRIBUTE", - - ADD: "+", - SUB: "-", - MUL: "*", - POW: "^", - QUO: "/", - - IQUO: "quo", - IREM: "rem", - IDIV: "div", - IMOD: "mod", - - AND: "&", - OR: "|", - - LAND: "&&", - LOR: "||", - - BIND: "=", - EQL: "==", - LSS: "<", - GTR: ">", - NOT: "!", - ARROW: "<-", - - NEQ: "!=", - LEQ: "<=", - GEQ: ">=", - - MAT: "=~", - NMAT: "!~", - - LPAREN: "(", - LBRACK: "[", - LBRACE: "{", - COMMA: ",", - PERIOD: ".", - ELLIPSIS: "...", - - RPAREN: ")", - RBRACK: "]", - RBRACE: "}", - SEMICOLON: ";", - COLON: ":", - OPTION: "?", - - BOTTOM: "_|_", - - FALSE: "false", - TRUE: "true", - NULL: "null", - - FOR: "for", - IF: "if", - IN: "in", - LET: "let", - FUNC: "func", -} - -// String returns the string corresponding to the token tok. -// For operators, delimiters, and keywords the string is the actual -// token character sequence (e.g., for the token ADD, the string is -// "+"). For all other tokens the string corresponds to the token -// constant name (e.g. for the token IDENT, the string is "IDENT"). -func (tok Token) String() string { - s := "" - if 0 <= tok && tok < Token(len(tokens)) { - s = tokens[tok] - } - if s == "" { - s = "token(" + strconv.Itoa(int(tok)) + ")" - } - return s -} - // A set of constants for precedence-based expression parsing. // Non-operators have lowest precedence, followed by operators // starting with precedence 1 up to unary operators. The highest @@ -236,8 +157,8 @@ var keywords map[string]Token func init() { keywords = make(map[string]Token) - for i := keywordBeg + 1; i < keywordEnd; i++ { - keywords[tokens[i]] = i + for tok := keywordBeg + 1; tok < keywordEnd; tok++ { + keywords[tok.String()] = tok } } diff --git a/vendor/cuelang.org/go/cue/token/token_string.go b/vendor/cuelang.org/go/cue/token/token_string.go new file mode 100644 index 0000000000..e845d9c2c4 --- /dev/null +++ b/vendor/cuelang.org/go/cue/token/token_string.go @@ -0,0 +1,82 @@ +// Code generated by "stringer -type=Token -linecomment"; DO NOT EDIT. + +package token + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ILLEGAL-0] + _ = x[EOF-1] + _ = x[COMMENT-2] + _ = x[ATTRIBUTE-3] + _ = x[literalBeg-4] + _ = x[IDENT-5] + _ = x[INT-6] + _ = x[FLOAT-7] + _ = x[STRING-8] + _ = x[INTERPOLATION-9] + _ = x[BOTTOM-10] + _ = x[literalEnd-11] + _ = x[operatorBeg-12] + _ = x[ADD-13] + _ = x[SUB-14] + _ = x[MUL-15] + _ = x[POW-16] + _ = x[QUO-17] + _ = x[IQUO-18] + _ = x[IREM-19] + _ = x[IDIV-20] + _ = x[IMOD-21] + _ = x[AND-22] + _ = x[OR-23] + _ = x[LAND-24] + _ = x[LOR-25] + _ = x[BIND-26] + _ = x[EQL-27] + _ = x[LSS-28] + _ = x[GTR-29] + _ = x[NOT-30] + _ = x[ARROW-31] + _ = x[NEQ-32] + _ = x[LEQ-33] + _ = x[GEQ-34] + _ = x[MAT-35] + _ = x[NMAT-36] + _ = x[LPAREN-37] + _ = x[LBRACK-38] + _ = x[LBRACE-39] + _ = x[COMMA-40] + _ = x[PERIOD-41] + _ = x[ELLIPSIS-42] + _ = x[RPAREN-43] + _ = x[RBRACK-44] + _ = x[RBRACE-45] + _ = x[SEMICOLON-46] + _ = x[COLON-47] + _ = x[OPTION-48] + _ = x[operatorEnd-49] + _ = x[keywordBeg-50] + _ = x[IF-51] + _ = x[FOR-52] + _ = x[IN-53] + _ = x[LET-54] + _ = x[FUNC-55] + _ = x[TRUE-56] + _ = x[FALSE-57] + _ = x[NULL-58] + _ = x[keywordEnd-59] +} + +const _Token_name = "ILLEGALEOFCOMMENTATTRIBUTEliteralBegIDENTINTFLOATSTRINGINTERPOLATION_|_literalEndoperatorBeg+-*^/quoremdivmod&|&&||===<>!<-!=<=>==~!~([{,....)]};:?operatorEndkeywordBegifforinletfunctruefalsenullkeywordEnd" + +var _Token_index = [...]uint8{0, 7, 10, 17, 26, 36, 41, 44, 49, 55, 68, 71, 81, 92, 93, 94, 95, 96, 97, 100, 103, 106, 109, 110, 111, 113, 115, 116, 118, 119, 120, 121, 123, 125, 127, 129, 131, 133, 134, 135, 136, 137, 138, 141, 142, 143, 144, 145, 146, 147, 158, 168, 170, 173, 175, 178, 182, 186, 191, 195, 205} + +func (i Token) String() string { + if i < 0 || i >= Token(len(_Token_index)-1) { + return "Token(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Token_name[_Token_index[i]:_Token_index[i+1]] +} diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go index 015118e01b..100714562d 100644 --- a/vendor/cuelang.org/go/cue/types.go +++ b/vendor/cuelang.org/go/cue/types.go @@ -223,8 +223,8 @@ type Iterator struct { type hiddenIterator = Iterator -// Next advances the iterator to the next value and reports whether there was -// any. It must be called before the first call to Value or Key. +// Next advances the iterator to the next value and reports whether there was any. +// It must be called before the first call to [Iterator.Value] or [Iterator.Selector]. func (i *Iterator) Next() bool { if i.p >= len(i.arcs) { i.cur = Value{} @@ -233,15 +233,15 @@ func (i *Iterator) Next() bool { arc := i.arcs[i.p] arc.Finalize(i.ctx) p := linkParent(i.val.parent_, i.val.v, arc) - i.cur = makeValue(i.val.idx, arc, p) i.f = arc.Label i.arcType = arc.ArcType + i.cur = makeValue(i.val.idx, arc, p) i.p++ return true } -// Value returns the current value in the list. It will panic if Next advanced -// past the last entry. +// Value returns the current value in the list. +// It will panic if [Iterator.Next] advanced past the last entry. func (i *Iterator) Value() Value { return i.cur } @@ -259,8 +259,8 @@ func (i *Iterator) Selector() Selector { // Label reports the label of the value if i iterates over struct fields and "" // otherwise. // -// Slated to be deprecated: use i.Selector().String(). Note that this will give -// more accurate string representations. +// Slated to be deprecated: use [Iterator.Selector] and [Selector.String]. +// Note that this will give more accurate string representations. func (i *hiddenIterator) Label() string { if i.f == 0 { return "" @@ -468,23 +468,6 @@ func (v Value) Uint64() (uint64, error) { return i, nil } -// trimZeros trims 0's for better JSON representations. -func trimZeros(s string) string { - n1 := len(s) - s2 := strings.TrimRight(s, "0") - n2 := len(s2) - if p := strings.IndexByte(s2, '.'); p != -1 { - if p == n2-1 { - return s[:len(s2)+1] - } - return s2 - } - if n1-n2 <= 4 { - return s - } - return fmt.Sprint(s2, "e+", n1-n2) -} - var ( smallestPosFloat64 *apd.Decimal smallestNegFloat64 *apd.Decimal @@ -634,6 +617,7 @@ func newValueRoot(idx *runtime.Runtime, ctx *adt.OpContext, x adt.Expr) Value { func newChildValue(o *structValue, i int) Value { arc := o.at(i) + // TODO: fix linkage to parent. return makeValue(o.v.idx, arc, linkParent(o.v.parent_, o.v.v, arc)) } @@ -641,16 +625,26 @@ func newChildValue(o *structValue, i int) Value { // otherwise. func Dereference(v Value) Value { n := v.v - if n == nil || len(n.Conjuncts) != 1 { + if n == nil { + return v + } + + c, count := n.SingleConjunct() + if count != 1 { return v } - c := n.Conjuncts[0] - r, _ := c.Expr().(adt.Resolver) + env, expr := c.EnvExpr() + + // TODO: consider supporting unwrapping of structs or comprehensions around + // a single embedded reference. + r, _ := expr.(adt.Resolver) if r == nil { return v } + c = adt.MakeRootConjunct(env, expr) + ctx := v.ctx() n, b := ctx.Resolve(c, r) if b != nil { @@ -981,6 +975,7 @@ func (v Value) Syntax(opts ...Option) ast.Node { ShowDocs: o.docs, ShowErrors: o.showErrors, InlineImports: o.inlineImports, + Fragment: o.raw, } pkgID := v.instance().ID() @@ -1066,9 +1061,11 @@ func (v hiddenValue) Split() []Value { return nil } a := []Value{} - for _, x := range v.v.Conjuncts { - a = append(a, remakeValue(v, x.Env, x.Expr())) - } + v.v.VisitLeafConjuncts(func(x adt.Conjunct) bool { + env, expr := x.EnvExpr() + a = append(a, remakeValue(v, env, expr)) + return true + }) return a } @@ -1080,10 +1077,17 @@ func (v Value) Source() ast.Node { if v.v == nil { return nil } - if len(v.v.Conjuncts) == 1 { - return v.v.Conjuncts[0].Source() + count := 0 + var src ast.Node + v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + src = c.Source() + count++ + return true + }) + if count > 1 || src == nil { + src = v.v.Value().Source() } - return v.v.Value().Source() + return src } // If v exactly represents a package, BuildInstance returns @@ -1121,24 +1125,25 @@ func (v Value) Pos() token.Pos { } // Pick the most-concrete field. var p token.Pos - for _, c := range v.v.Conjuncts { + v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { x := c.Elem() pp := pos(x) if pp == token.NoPos { - continue + return true } p = pp // Prefer struct conjuncts with actual fields. if s, ok := x.(*adt.StructLit); ok && len(s.Fields) > 0 { - break + return false } - } + return true + }) return p } // TODO: IsFinal: this value can never be changed. -// IsClosed reports whether a list of struct is closed. It reports false when +// IsClosed reports whether a list or struct is closed. It reports false when // the value is not a list or struct. // // Deprecated: use Allows(AnyString) and Allows(AnyIndex) or Kind/IncompleteKind. @@ -1150,6 +1155,8 @@ func (v hiddenValue) IsClosed() bool { case ListKind: return v.v.IsClosedList() case StructKind: + // TODO: remove this more expensive computation once the old evaluator + // is removed. return !v.Allows(AnyString) } return false @@ -1160,6 +1167,9 @@ func (v hiddenValue) IsClosed() bool { // Allows does not take into account validators like list.MaxItems(4). This may // change in the future. func (v Value) Allows(sel Selector) bool { + if v.v.HasEllipsis { + return true + } c := v.ctx() f := sel.sel.feature(c) return v.v.Accept(c, f) @@ -1173,7 +1183,7 @@ func (v Value) IsConcrete() bool { if v.v == nil { return false // any is neither concrete, not a list or struct. } - if b, ok := v.v.BaseValue.(*adt.Bottom); ok { + if b := v.v.Bottom(); b != nil { return !b.IsIncomplete() } if !adt.IsConcrete(v.v) { @@ -1198,12 +1208,37 @@ func (v Value) Exists() bool { if v.v == nil { return false } - if err, ok := v.v.BaseValue.(*adt.Bottom); ok { + if err := v.v.Bottom(); err != nil { return !err.NotExists } return true } +// isKind reports whether a value matches a particular kind. +// It is like checkKind, except that it doesn't construct an error value. +// Note that when v is bottom, the method always returns false. +func (v Value) isKind(ctx *adt.OpContext, want adt.Kind) bool { + if v.v == nil { + return false + } + x := v.eval(ctx) + if _, ok := x.(*adt.Bottom); ok { + return false + } + k := x.Kind() + if want != adt.BottomKind { + if k&want == adt.BottomKind { + return false + } + if !adt.IsConcrete(x) { + return false + } + } + return true +} + +// checkKind returns a bottom error if a value does not match a particular kind, +// describing the reason why. Note that when v is bottom, it is always returned as-is. func (v Value) checkKind(ctx *adt.OpContext, want adt.Kind) *adt.Bottom { if v.v == nil { return errNotExists @@ -1269,7 +1304,7 @@ func (v Value) Len() Value { // Elem returns the value of undefined element types of lists and structs. // -// Deprecated: use LookupPath in combination with "AnyString" or "AnyIndex". +// Deprecated: use [Value.LookupPath] in combination with "AnyString" or "AnyIndex". func (v hiddenValue) Elem() (Value, bool) { sel := AnyString if v.v.IsList() { @@ -1305,10 +1340,11 @@ func (v Value) Null() error { return nil } -// // IsNull reports whether v is null. -// func (v Value) IsNull() bool { -// return v.Null() == nil -// } +// IsNull reports whether v is null. +func (v Value) IsNull() bool { + v, _ = v.Default() + return v.isKind(v.ctx(), adt.NullKind) +} // Bool returns the bool value of v or false and an error if v is not a boolean. func (v Value) Bool() (bool, error) { @@ -1377,8 +1413,8 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err obj := v.v - switch b, ok := v.v.BaseValue.(*adt.Bottom); { - case ok && b.IsIncomplete() && !o.concrete && !o.final: + switch b := v.v.Bottom(); { + case b != nil && b.IsIncomplete() && !o.concrete && !o.final: // Allow scalar values if hidden or definition fields are requested. case !o.omitHidden, !o.omitDefinitions: @@ -1405,7 +1441,7 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err if f.IsHidden() && o.omitHidden { continue } - arc := obj.Lookup(f) + arc := obj.LookupRaw(f) if arc == nil { continue } @@ -1420,7 +1456,7 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err // it avoids hiding errors in required fields. if o.omitOptional || o.concrete || o.final { arc = &adt.Vertex{ - Label: arc.Label, + Label: f, Parent: arc.Parent, Conjuncts: arc.Conjuncts, BaseValue: adt.NewRequiredNotPresentError(ctx, arc), @@ -1436,7 +1472,7 @@ func (v Value) structValOpts(ctx *adt.OpContext, o options) (s structValue, err // Struct returns the underlying struct of a value or an error if the value // is not a struct. // -// Deprecated: use Fields. +// Deprecated: use [Value.Fields]. func (v hiddenValue) Struct() (*Struct, error) { ctx := v.ctx() obj, err := v.structValOpts(ctx, options{}) @@ -1605,7 +1641,7 @@ func appendPath(a []Selector, v Value) []Selector { // LookupDef is equal to LookupPath(MakePath(Def(name))). // -// Deprecated: use LookupPath. +// Deprecated: use [Value.LookupPath]. func (v hiddenValue) LookupDef(name string) Value { return v.LookupPath(MakePath(Def(name))) } @@ -1616,7 +1652,7 @@ var errNotFound = errors.Newf(token.NoPos, "field not found") // look up a definition or hidden field (starting with `_` or `_#`). Otherwise // it interprets name as an arbitrary string for a regular field. // -// Deprecated: use LookupPath. +// Deprecated: use [Value.LookupPath]. func (v hiddenValue) FieldByName(name string, isIdent bool) (f FieldInfo, err error) { s, err := v.Struct() if err != nil { @@ -1627,7 +1663,7 @@ func (v hiddenValue) FieldByName(name string, isIdent bool) (f FieldInfo, err er // LookupField reports information about a field of v. // -// Deprecated: use LookupPath +// Deprecated: use [Value.LookupPath]. func (v hiddenValue) LookupField(name string) (FieldInfo, error) { s, err := v.Struct() if err != nil { @@ -1665,7 +1701,7 @@ func (v hiddenValue) LookupField(name string) (FieldInfo, error) { // Any reference in v referring to the value at the given path will resolve // to x in the newly created value. The resulting value is not validated. // -// Deprecated: use FillPath. +// Deprecated: use [Value.FillPath]. func (v hiddenValue) Fill(x interface{}, path ...string) Value { if v.v == nil { return v @@ -1771,14 +1807,18 @@ func (v Value) FillPath(p Path, x interface{}) Value { // The returned function returns the value that would be unified with field // given its name. // -// Deprecated: use LookupPath in combination with using optional selectors. +// Deprecated: use [Value.LookupPath] in combination with using optional selectors. func (v hiddenValue) Template() func(label string) Value { if v.v == nil { return nil } + // Implementation for the old evaluator. types := v.v.OptionalTypes() - if types&(adt.HasAdditional|adt.HasPattern) == 0 { + switch { + case types&(adt.HasAdditional|adt.HasPattern) != 0: + case v.v.PatternConstraints != nil: + default: return nil } @@ -1977,13 +2017,15 @@ func (v hiddenValue) Reference() (inst *Instance, path []string) { // is not a reference. func (v Value) ReferencePath() (root Value, p Path) { // TODO: don't include references to hidden fields. - if v.v == nil || len(v.v.Conjuncts) != 1 { + c, count := v.v.SingleConjunct() + if count != 1 { return Value{}, Path{} } ctx := v.ctx() - c := v.v.Conjuncts[0] - x, path := reference(v.idx, ctx, c.Env, c.Expr()) + env, expr := c.EnvExpr() + + x, path := reference(v.idx, ctx, env, expr) if x == nil { return Value{}, Path{} } @@ -2120,7 +2162,7 @@ func DisallowCycles(disallow bool) Option { // ResolveReferences forces the evaluation of references when outputting. // -// Deprecated: Syntax will now always attempt to resolve dangling references and +// Deprecated: [Value.Syntax] will now always attempt to resolve dangling references and // make the output self-contained. When [Final] or [Concrete] are used, // it will already attempt to resolve all references. // See also [InlineImports]. @@ -2149,7 +2191,13 @@ func ErrorsAsValues(show bool) Option { return func(p *options) { p.showErrors = show } } -// Raw tells Syntax to generate the value as is without any simplifications. +// Raw tells Syntax to generate the value as is without any simplifications and +// without ensuring a value is self contained. Any references are left dangling. +// The generated syntax tree can be compiled by passing the Value from which it +// was generated to scope. +// +// The option InlineImports overrides this option with respect to ensuring the +// output is self contained. func Raw() Option { return func(p *options) { p.raw = true } } @@ -2307,42 +2355,45 @@ func (v Value) Expr() (Op, []Value) { if v.v.IsData() { expr = v.v.Value() + goto process - } else { - switch len(v.v.Conjuncts) { - case 0: - if v.v.BaseValue == nil { - return NoOp, []Value{makeValue(v.idx, v.v, v.parent_)} // TODO: v? - } - expr = v.v.Value() - - case 1: - // the default case, processed below. - c := v.v.Conjuncts[0] - env = c.Env - expr = c.Expr() - if w, ok := expr.(*adt.Vertex); ok { - return Value{v.idx, w, v.parent_}.Expr() - } + } - default: - a := []Value{} - ctx := v.ctx() - for _, c := range v.v.Conjuncts { - // Keep parent here. TODO: do we need remove the requirement - // from other conjuncts? - n := &adt.Vertex{ - Parent: v.v.Parent, - Label: v.v.Label, - } - n.AddConjunct(c) - n.Finalize(ctx) - a = append(a, makeValue(v.idx, n, v.parent_)) - } - return adt.AndOp, a + switch c, count := v.v.SingleConjunct(); count { + case 0: + if v.v.BaseValue == nil { + return NoOp, []Value{makeValue(v.idx, v.v, v.parent_)} // TODO: v? + } + expr = v.v.Value() + + case 1: + // the default case, processed below. + env, expr = c.EnvExpr() + if w, ok := expr.(*adt.Vertex); ok { + return Value{v.idx, w, v.parent_}.Expr() } + + default: + a := []Value{} + ctx := v.ctx() + v.v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + // Keep parent here. TODO: do we need remove the requirement + // from other conjuncts? + n := &adt.Vertex{ + Parent: v.v.Parent, + Label: v.v.Label, + } + n.AddConjunct(c) + n.Finalize(ctx) + a = append(a, makeValue(v.idx, n, v.parent_)) + return true + }) + + return adt.AndOp, a } +process: + // TODO: replace appends with []Value{}. For not leave. a := []Value{} op := NoOp diff --git a/vendor/cuelang.org/go/encoding/gocode/gocodec/codec.go b/vendor/cuelang.org/go/encoding/gocode/gocodec/codec.go deleted file mode 100644 index 87879cb20c..0000000000 --- a/vendor/cuelang.org/go/encoding/gocode/gocodec/codec.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package codec converts Go to and from CUE and validates Go values based on -// CUE constraints. -// -// CUE constraints can be used to validate Go types as well as fill out -// missing struct fields that are implied from the constraints and the values -// already defined by the struct value. -package gocodec - -import ( - "sync" - - "cuelang.org/go/cue" - "cuelang.org/go/cue/cuecontext" - "cuelang.org/go/internal/value" -) - -// Config has no options yet, but is defined for future extensibility. -type Config struct { -} - -// A Codec decodes and encodes CUE from and to Go values and validates and -// completes Go values based on CUE templates. -type Codec struct { - runtime *cue.Context - mutex sync.RWMutex -} - -// New creates a new Codec for the given instance. -// -// It is safe to use the methods of Codec concurrently as long as the given -// Runtime is not used elsewhere while using Codec. However, only the concurrent -// use of Decode, Validate, and Complete is efficient. -// -// Note: calling this with a *cue.Runtime value is deprecated. -func New[Ctx *cue.Runtime | *cue.Context](ctx Ctx, c *Config) *Codec { - return &Codec{runtime: value.ConvertToContext(ctx)} -} - -// ExtractType extracts a CUE value from a Go type. -// -// The type represented by x is converted as the underlying type. Specific -// values, such as map or slice elements or field values of structs are ignored. -// If x is of type reflect.Type, the type represented by x is extracted. -// -// Fields of structs can be annoted using additional constrains using the 'cue' -// field tag. The value of the tag is a CUE expression, which may contain -// references to the JSON name of other fields in a struct. -// -// type Sum struct { -// A int `cue:"c-b" json:"a,omitempty"` -// B int `cue:"c-a" json:"b,omitempty"` -// C int `cue:"a+b" json:"c,omitempty"` -// } -func (c *Codec) ExtractType(x interface{}) (cue.Value, error) { - // ExtractType cannot introduce new fields on repeated calls. We could - // consider optimizing the lock usage based on this property. - c.mutex.Lock() - defer c.mutex.Unlock() - - return fromGoType(c.runtime, x) -} - -// TODO: allow extracting constraints and type info separately? - -// Decode converts x to a CUE value. -// -// If x is of type reflect.Value it will convert the value represented by x. -func (c *Codec) Decode(x interface{}) (cue.Value, error) { - c.mutex.Lock() - defer c.mutex.Unlock() - - // Depending on the type, can introduce new labels on repeated calls. - return fromGoValue(c.runtime, x, false) -} - -// Encode converts v to a Go value. -func (c *Codec) Encode(v cue.Value, x interface{}) error { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return v.Decode(x) -} - -var defaultCodec = New(value.ConvertToRuntime(cuecontext.New()), nil) - -// Validate calls Validate on a default Codec for the type of x. -func Validate(x interface{}) error { - c := defaultCodec - c.mutex.RLock() - defer c.mutex.RUnlock() - - r := defaultCodec.runtime - v, err := fromGoType(r, x) - if err != nil { - return err - } - w, err := fromGoValue(r, x, false) - if err != nil { - return err - } - v = v.Unify(w) - if err := v.Validate(); err != nil { - return err - } - return nil -} - -// Validate checks whether x satisfies the constraints defined by v. -// -// The given value must be created using the same Runtime with which c was -// initialized. -func (c *Codec) Validate(v cue.Value, x interface{}) error { - c.mutex.RLock() - defer c.mutex.RUnlock() - - r := checkAndForkContext(c.runtime, v) - w, err := fromGoValue(r, x, false) - if err != nil { - return err - } - return w.Unify(v).Err() -} - -// Complete sets previously undefined values in x that can be uniquely -// determined form the constraints defined by v if validation passes, or returns -// an error, without modifying anything, otherwise. -// -// Only undefined values are modified. A value is considered undefined if it is -// pointer type and is nil or if it is a field with a zero value that has a json -// tag with the omitempty flag. -// -// The given value must be created using the same Runtime with which c was -// initialized. -// -// Complete does a JSON round trip. This means that data not preserved in such a -// round trip, such as the location name of a time.Time, is lost after a -// successful update. -func (c *Codec) Complete(v cue.Value, x interface{}) error { - c.mutex.RLock() - defer c.mutex.RUnlock() - - r := checkAndForkContext(c.runtime, v) - w, err := fromGoValue(r, x, true) - if err != nil { - return err - } - - w = w.Unify(v) - if err := w.Validate(cue.Concrete(true)); err != nil { - return err - } - return w.Decode(x) -} - -func fromGoValue(r *cue.Context, x interface{}, allowDefault bool) (cue.Value, error) { - v := value.FromGoValue(r, x, allowDefault) - if err := v.Err(); err != nil { - return v, err - } - return v, nil -} - -func fromGoType(r *cue.Context, x interface{}) (cue.Value, error) { - v := value.FromGoType(r, x) - if err := v.Err(); err != nil { - return v, err - } - return v, nil -} - -func checkAndForkContext(r *cue.Context, v cue.Value) *cue.Context { - rr := v.Context() - if r != rr { - panic("value not from same runtime") - } - return rr -} diff --git a/vendor/cuelang.org/go/encoding/json/json.go b/vendor/cuelang.org/go/encoding/json/json.go index 69c8986518..28f79f5553 100644 --- a/vendor/cuelang.org/go/encoding/json/json.go +++ b/vendor/cuelang.org/go/encoding/json/json.go @@ -28,7 +28,6 @@ import ( "cuelang.org/go/cue/literal" "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" - "cuelang.org/go/internal/value" ) // Valid reports whether data is a valid JSON encoding. @@ -42,15 +41,14 @@ func Validate(b []byte, v cue.Value) error { if !json.Valid(b) { return fmt.Errorf("json: invalid JSON") } - r := value.ConvertToRuntime(v.Context()) - inst, err := r.Compile("json.Validate", b) - if err != nil { + v2 := v.Context().CompileBytes(b, cue.Filename("json.Validate")) + if err := v2.Err(); err != nil { return err } - v = v.Unify(inst.Value()) - if v.Err() != nil { - return v.Err() + v = v.Unify(v2) + if err := v.Err(); err != nil { + return err } return v.Validate(cue.Final()) } @@ -99,19 +97,17 @@ func extract(path string, b []byte) (ast.Expr, error) { // The runtime may be nil if Decode isn't used. func NewDecoder(r *cue.Runtime, path string, src io.Reader) *Decoder { return &Decoder{ - r: r, - path: path, - dec: json.NewDecoder(src), - offset: 1, + r: r, + path: path, + dec: json.NewDecoder(src), } } // A Decoder converts JSON values to CUE. type Decoder struct { - r *cue.Runtime - path string - dec *json.Decoder - offset int + r *cue.Runtime + path string + dec *json.Decoder } // Extract converts the current JSON value to a CUE ast. It returns io.EOF @@ -131,13 +127,12 @@ func (d *Decoder) extract() (ast.Expr, error) { if err == io.EOF { return nil, err } - offset := d.offset - d.offset += len(raw) if err != nil { - pos := token.NewFile(d.path, offset, len(raw)).Pos(0, 0) + pos := token.NewFile(d.path, -1, len(raw)).Pos(0, 0) return nil, errors.Wrapf(err, pos, "invalid JSON for file %q", d.path) } - expr, err := parser.ParseExpr(d.path, []byte(raw), parser.FileOffset(offset)) + expr, err := parser.ParseExpr(d.path, []byte(raw)) + if err != nil { return nil, err } diff --git a/vendor/cuelang.org/go/encoding/openapi/decode.go b/vendor/cuelang.org/go/encoding/openapi/decode.go index e911cc11de..d60e82d686 100644 --- a/vendor/cuelang.org/go/encoding/openapi/decode.go +++ b/vendor/cuelang.org/go/encoding/openapi/decode.go @@ -51,8 +51,8 @@ func Extract(data cue.InstanceOrValue, c *Config) (*ast.File, error) { v := data.Value() - doc, _ := v.Lookup("info", "title").String() // Required - if s, _ := v.Lookup("info", "description").String(); s != "" { + doc, _ := v.LookupPath(cue.MakePath(cue.Str("info"), cue.Str("title"))).String() // Required + if s, _ := v.LookupPath(cue.MakePath(cue.Str("info"), cue.Str("description"))).String(); s != "" { doc += "\n\n" + s } cg := internal.NewComment(true, doc) @@ -84,7 +84,7 @@ func Extract(data cue.InstanceOrValue, c *Config) (*ast.File, error) { // add(internal.NewAttr("openapi", "version="+ version)) // } - if info := v.Lookup("info"); info.Exists() { + if info := v.LookupPath(cue.MakePath(cue.Str("info"))); info.Exists() { decls := []interface{}{} if st, ok := info.Syntax().(*ast.StructLit); ok { // Remove title. diff --git a/vendor/cuelang.org/go/encoding/openapi/doc.go b/vendor/cuelang.org/go/encoding/openapi/doc.go index 3c388befa0..a8eecb6955 100644 --- a/vendor/cuelang.org/go/encoding/openapi/doc.go +++ b/vendor/cuelang.org/go/encoding/openapi/doc.go @@ -17,5 +17,5 @@ // // It currently handles OpenAPI Schema components only. // -// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#schemaObject. +// See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md#schema-object package openapi diff --git a/vendor/cuelang.org/go/encoding/openapi/openapi.go b/vendor/cuelang.org/go/encoding/openapi/openapi.go index a39ec92c11..49d71cd490 100644 --- a/vendor/cuelang.org/go/encoding/openapi/openapi.go +++ b/vendor/cuelang.org/go/encoding/openapi/openapi.go @@ -165,8 +165,8 @@ func (c *Config) compose(inst cue.InstanceOrValue, schemas *ast.StructLit) (x *a errs = errors.Append(errs, errors.Newf(i.Value().Pos(), "info must be a struct")) } - title, _ = i.Value().Lookup("title").String() - version, _ = i.Value().Lookup("version").String() + title, _ = i.Value().LookupPath(cue.MakePath(cue.Str("title"))).String() + version, _ = i.Value().LookupPath(cue.MakePath(cue.Str("version"))).String() default: errs = errors.Append(errs, errors.Newf(i.Value().Pos(), @@ -186,7 +186,7 @@ func (c *Config) compose(inst cue.InstanceOrValue, schemas *ast.StructLit) (x *a } if version == "" { - version, _ = val.Lookup("$version").String() + version, _ = val.LookupPath(cue.MakePath(cue.Str("$version"))).String() if version == "" { version = "no version" } diff --git a/vendor/cuelang.org/go/encoding/openapi/types.go b/vendor/cuelang.org/go/encoding/openapi/types.go index 6554c9683f..aae0e107f5 100644 --- a/vendor/cuelang.org/go/encoding/openapi/types.go +++ b/vendor/cuelang.org/go/encoding/openapi/types.go @@ -25,7 +25,7 @@ import ( "cuelang.org/go/cue/token" ) -// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#data-types +// See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md#data-types var cueToOpenAPI = map[string]string{ "int32": "int32", "int64": "int64", diff --git a/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go b/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go index ad4ecc3948..14c55c3ff7 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/jsonpb/decoder.go @@ -18,6 +18,8 @@ import ( "encoding/base64" "strings" + "github.com/cockroachdb/apd/v3" + "cuelang.org/go/cue" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/ast/astutil" @@ -25,7 +27,6 @@ import ( "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" "cuelang.org/go/encoding/protobuf/pbinternal" - "github.com/cockroachdb/apd/v3" ) // Option is an option. @@ -77,7 +78,7 @@ func NewDecoder(schema cue.Value, options ...Option) *Decoder { // according to the protocol buffer to JSON mapping defined in the protocol // buffer spec. // -// RewriteFile is idempotent, calling it multiples times on an expression gives +// RewriteFile is idempotent, calling it multiple times on an expression gives // the same result. func (d *Decoder) RewriteFile(file *ast.File) error { var r rewriter diff --git a/vendor/cuelang.org/go/encoding/protobuf/parse.go b/vendor/cuelang.org/go/encoding/protobuf/parse.go index 8881a031db..09929f53bd 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/parse.go +++ b/vendor/cuelang.org/go/encoding/protobuf/parse.go @@ -47,7 +47,7 @@ func (s *Extractor) parse(filename string, src interface{}) (p *protoConverter, s.fileCache[filename] = result{p, err} }() - b, err := source.Read(filename, src) + b, err := source.ReadAll(filename, src) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func (s *Extractor) parse(filename string, src interface{}) (p *protoConverter, return nil, errors.Newf(token.NoPos, "protobuf: %v", err) } - tfile := token.NewFile(filename, 0, len(b)) + tfile := token.NewFile(filename, -1, len(b)) tfile.SetLinesForContent(b) p = &protoConverter{ diff --git a/vendor/cuelang.org/go/encoding/protobuf/protobuf.go b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go index 2d4a35efd3..efb7924e80 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/protobuf.go +++ b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go @@ -22,14 +22,14 @@ // # Package Paths // // If a .proto file contains a go_package directive, it will be used as the -// destination package fo the generated .cue files. A common use case is to +// destination package for the generated .cue files. A common use case is to // generate the CUE in the same directory as the .proto definition. If a // destination package is not within the current CUE module, it will be written // relative to the pkg directory. // // If a .proto file does not specify go_package, it will convert a proto package // "google.parent.sub" to the import path "googleapis.com/google/parent/sub". -// It is safe to mix package with and without a go_package within the same +// It is safe to mix packages with and without a go_package within the same // project. // // # Type Mappings @@ -64,6 +64,8 @@ // Timestamp time.Time See struct.proto. // Duration time.Duration See struct.proto. // +// # Annotations +// // Protobuf definitions can be annotated with CUE constraints that are included // in the generated CUE: // @@ -87,11 +89,9 @@ package protobuf import ( "os" "path/filepath" - "sort" + "slices" "strings" - "github.com/mpvl/unique" - "cuelang.org/go/cue/ast" "cuelang.org/go/cue/build" "cuelang.org/go/cue/errors" @@ -99,6 +99,7 @@ import ( "cuelang.org/go/cue/parser" "cuelang.org/go/cue/token" "cuelang.org/go/internal" + "cuelang.org/go/mod/module" // Generated protobuf CUE may use builtins. Ensure that these can always be // found, even if the user does not use cue/load or another package that @@ -181,13 +182,27 @@ type result struct { // it will be observable by the Err method fo the Extractor. It is safe, // however, to only check errors after building the output. func NewExtractor(c *Config) *Extractor { + var modulePath string + // We don't want to consider the module's major version as + // part of the path when checking to see a protobuf package + // declares itself as part of that module. + // TODO(rogpeppe) the Go package path might itself include a major + // version, so we should probably consider that too. + if c.Module != "" { + var ok bool + modulePath, _, ok = module.SplitPathVersion(c.Module) + if !ok { + modulePath = c.Module + + } + } cwd, _ := os.Getwd() b := &Extractor{ root: c.Root, cwd: cwd, paths: c.Paths, pkgName: c.PkgName, - module: c.Module, + module: modulePath, enumMode: c.EnumMode, fileCache: map[string]result{}, imports: map[string]*build.Instance{}, @@ -300,20 +315,20 @@ func (b *Extractor) Instances() (instances []*build.Instance, err error) { for _, p := range b.imports { instances = append(instances, p) - sort.Strings(p.ImportPaths) - unique.Strings(&p.ImportPaths) + slices.Sort(p.ImportPaths) + p.ImportPaths = slices.Compact(p.ImportPaths) for _, i := range p.ImportPaths { if imp := b.imports[i]; imp != nil { p.Imports = append(p.Imports, imp) } } - sort.Slice(p.Files, func(i, j int) bool { - return p.Files[i].Filename < p.Files[j].Filename + slices.SortFunc(p.Files, func(a, b *ast.File) int { + return strings.Compare(a.Filename, b.Filename) }) } - sort.Slice(instances, func(i, j int) bool { - return instances[i].ImportPath < instances[j].ImportPath + slices.SortFunc(instances, func(a, b *build.Instance) int { + return strings.Compare(a.ImportPath, b.ImportPath) }) if err != nil { diff --git a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go index d5f95472d8..e917342d30 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go @@ -77,7 +77,7 @@ func (d *Decoder) Parse(schema cue.Value, filename string, b []byte) (ast.Expr, // dec.errs = nil - f := token.NewFile(filename, 0, len(b)) + f := token.NewFile(filename, -1, len(b)) f.SetLinesForContent(b) dec.file = f diff --git a/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go b/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go index 11c511d628..787b45148d 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go +++ b/vendor/cuelang.org/go/encoding/protobuf/textproto/encoder.go @@ -15,7 +15,7 @@ package textproto import ( - "fmt" + "strconv" "strings" "cuelang.org/go/cue" @@ -179,7 +179,11 @@ func (e *encoder) encodeValue(n *pbast.Node, v cue.Value) { n.Values = append(n.Values, sn.Values...) case cue.BoolKind: - value = fmt.Sprint(v) + t, err := v.Bool() + if err != nil { + e.addErr(err) + } + value = strconv.FormatBool(t) n.Values = append(n.Values, &pbast.Value{Value: value}) case cue.IntKind, cue.FloatKind, cue.NumberKind: diff --git a/vendor/cuelang.org/go/encoding/protobuf/util.go b/vendor/cuelang.org/go/encoding/protobuf/util.go index def05fb181..13afa50326 100644 --- a/vendor/cuelang.org/go/encoding/protobuf/util.go +++ b/vendor/cuelang.org/go/encoding/protobuf/util.go @@ -49,8 +49,8 @@ func addComments(f ast.Node, i int, doc, inline *proto.Comment) bool { if cg != nil && len(cg.List) > 0 && i > 0 { cg.List[0].Slash = newSection } - f.AddComment(cg) - f.AddComment(comment(inline, false)) + ast.AddComment(f, cg) + ast.AddComment(f, comment(inline, false)) return doc != nil } diff --git a/vendor/cuelang.org/go/internal/astinternal/debugstr.go b/vendor/cuelang.org/go/internal/astinternal/debugstr.go index 81cba96659..83091af5c1 100644 --- a/vendor/cuelang.org/go/internal/astinternal/debugstr.go +++ b/vendor/cuelang.org/go/internal/astinternal/debugstr.go @@ -27,7 +27,7 @@ import ( func DebugStr(x interface{}) (out string) { if n, ok := x.(ast.Node); ok { comments := "" - for _, g := range n.Comments() { + for _, g := range ast.Comments(n) { comments += DebugStr(g) } if comments != "" { diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go index 44707bb1ee..6e50e3f1d0 100644 --- a/vendor/cuelang.org/go/internal/attrs.go +++ b/vendor/cuelang.org/go/internal/attrs.go @@ -148,7 +148,7 @@ func ParseAttrBody(pos token.Pos, s string) (a Attr) { // Create temporary token.File so that scanner has something // to work with. // TODO it's probably possible to do this without allocations. - tmpFile := token.NewFile("", 0, len(s)) + tmpFile := token.NewFile("", -1, len(s)) if len(s) > 0 { tmpFile.AddLine(len(s) - 1) } diff --git a/vendor/cuelang.org/go/internal/cli/cli.go b/vendor/cuelang.org/go/internal/cli/cli.go index f6ffd251fe..d4702e4cc4 100644 --- a/vendor/cuelang.org/go/internal/cli/cli.go +++ b/vendor/cuelang.org/go/internal/cli/cli.go @@ -32,7 +32,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e expr, err = parser.ParseExpr(name, str) if err != nil { errs = errors.Wrapf(err, pos, - "invalid number for environment variable %s", name) + "invalid number for injection tag %q", name) } } @@ -41,7 +41,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e b, ok := boolValues[str] if !ok { errs = errors.Append(errs, errors.Newf(pos, - "invalid boolean value %q for environment variable %s", str, name)) + "invalid boolean value %q for injection tag %q", str, name)) } else if expr != nil || k&cue.StringKind != 0 { // Convert into an expression bl := ast.NewBool(b) @@ -70,7 +70,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e return x, nil case errs == nil: return nil, errors.Newf(pos, - "invalid type for environment variable %s", name) + "invalid type for injection tag %q", name) } return nil, errs } diff --git a/vendor/cuelang.org/go/internal/core/adt/adt.go b/vendor/cuelang.org/go/internal/core/adt/adt.go index 5d114b8382..a6722c1e23 100644 --- a/vendor/cuelang.org/go/internal/core/adt/adt.go +++ b/vendor/cuelang.org/go/internal/core/adt/adt.go @@ -39,7 +39,7 @@ func Resolve(ctx *OpContext, c Conjunct) *Vertex { v = x case Resolver: - r, err := ctx.resolveState(c, x, finalized) + r, err := ctx.resolveState(c, x, attempt(finalized, allKnown)) if err != nil { v = err break @@ -60,7 +60,7 @@ func Resolve(ctx *OpContext, c Conjunct) *Vertex { return ToVertex(v) } -// A Node is any abstract data type representing an value or expression. +// A Node is any abstract data type representing a value or expression. type Node interface { Source() ast.Node node() // enforce internal. @@ -108,14 +108,14 @@ type Evaluator interface { // evaluate evaluates the underlying expression. If the expression // is incomplete, it may record the error in ctx and return nil. - evaluate(ctx *OpContext, state vertexStatus) Value + evaluate(ctx *OpContext, state combinedFlags) Value } // A Resolver represents a reference somewhere else within a tree that resolves // a value. type Resolver interface { Node - resolve(ctx *OpContext, state vertexStatus) *Vertex + resolve(ctx *OpContext, state combinedFlags) *Vertex } type YieldFunc func(env *Environment) @@ -251,6 +251,9 @@ func (x *Ellipsis) expr() Expr { } return x.Value } +func (*ConjunctGroup) declNode() {} +func (*ConjunctGroup) elemNode() {} +func (*ConjunctGroup) expr() {} var top = &Top{} @@ -338,6 +341,7 @@ func (*Comprehension) elemNode() {} func (*Vertex) node() {} func (*Conjunction) node() {} +func (*ConjunctGroup) node() {} func (*Disjunction) node() {} func (*BoundValue) node() {} func (*Builtin) node() {} diff --git a/vendor/cuelang.org/go/internal/core/adt/binop.go b/vendor/cuelang.org/go/internal/core/adt/binop.go index 1837e7c99e..fefcd48838 100644 --- a/vendor/cuelang.org/go/internal/core/adt/binop.go +++ b/vendor/cuelang.org/go/internal/core/adt/binop.go @@ -208,6 +208,13 @@ func BinOp(c *OpContext, op Op, left, right Value) Value { n := c.newInlineVertex(nil, nil, MakeConjunct(c.Env(0), list, c.ci)) n.CompleteArcs(c) + // NOTE: if we set isData to true, whoever processes the result will + // avoid having to process the expressions again. This improves + // performance. It also change the a potential cycle error message + // to a more concrete messages as if this result was unified as is. + // TODO: uncomment this and see if we like the result. + // n.isData = true + return n } diff --git a/vendor/cuelang.org/go/internal/core/adt/closed.go b/vendor/cuelang.org/go/internal/core/adt/closed.go index 51fd6d2bcb..fe2baf2004 100644 --- a/vendor/cuelang.org/go/internal/core/adt/closed.go +++ b/vendor/cuelang.org/go/internal/core/adt/closed.go @@ -344,6 +344,9 @@ func isClosed(v *Vertex) bool { // Accept determines whether f is allowed in n. It uses the OpContext for // caching administrative fields. func Accept(ctx *OpContext, n *Vertex, f Feature) (found, required bool) { + if ctx.isDevVersion() { + return n.accept(ctx, f), true + } ctx.generation++ ctx.todo = nil diff --git a/vendor/cuelang.org/go/internal/core/adt/closed2.go b/vendor/cuelang.org/go/internal/core/adt/closed2.go index c2c1b61133..ce25d47009 100644 --- a/vendor/cuelang.org/go/internal/core/adt/closed2.go +++ b/vendor/cuelang.org/go/internal/core/adt/closed2.go @@ -32,6 +32,8 @@ func isComplexStruct(ctx *OpContext, v *Vertex) bool { // TODO: cleanup code and error messages. Reduce duplication in some related // code. func verifyArc2(ctx *OpContext, f Feature, v *Vertex, isClosed bool) (found bool, err *Bottom) { + unreachableForDev(ctx) + // Don't check computed, temporary vertices. if v.Label == InvalidLabel { return true, nil diff --git a/vendor/cuelang.org/go/internal/core/adt/composite.go b/vendor/cuelang.org/go/internal/core/adt/composite.go index d315e2828c..ec2fe461b6 100644 --- a/vendor/cuelang.org/go/internal/core/adt/composite.go +++ b/vendor/cuelang.org/go/internal/core/adt/composite.go @@ -102,7 +102,7 @@ type cacheKey struct { } func (e *Environment) up(ctx *OpContext, count int32) *Environment { - for ; count > 0; count-- { + for i := int32(0); i < count; i++ { e = e.Up ctx.Assertf(ctx.Pos(), e.Vertex != nil, "Environment.up encountered a nil vertex") } @@ -127,7 +127,7 @@ func (e *Environment) evalCached(c *OpContext, x Expr) Value { // Save and restore errors to ensure that only relevant errors are // associated with the cash. err := c.errs - v = c.evalState(x, partial) // TODO: should this be finalized? + v = c.evalState(x, require(partial, allKnown)) // TODO: should this be finalized? c.e, c.src = env, src c.errs = err if b, ok := v.(*Bottom); !ok || !b.IsIncomplete() { @@ -157,12 +157,16 @@ type Vertex struct { // eval: *, BaseValue: * -- finalized // state *nodeContext - // TODO: move the following status fields to nodeContext. + + // cc manages the closedness logic for this Vertex. It is created + // by rootCloseContext. + // TODO: move back to nodeContext, but be sure not to clone it. + cc *closeContext // Label is the feature leading to this vertex. Label Feature - // TODO: move the following status fields to nodeContext. + // TODO: move the following fields to nodeContext. // status indicates the evaluation progress of this vertex. status vertexStatus @@ -182,6 +186,10 @@ type Vertex struct { // conjuncts, or ancestor conjuncts, is a definition. Closed bool + // HasEllipsis indicates that this Vertex is open by means of an ellipsis. + // TODO: combine this field with Closed once we removed the old evaluator. + HasEllipsis bool + // MultiLet indicates whether multiple let fields were added from // different sources. If true, a LetReference must be resolved using // the per-Environment value cache. @@ -192,11 +200,6 @@ type Vertex struct { // or any other operation that relies on the set of arcs being constant. LockArcs bool - // disallowedField means that this arc is not allowed according - // to the closedness rules. This is used to avoid duplicate error reporting. - // TODO: perhaps rename to notAllowedErrorEmitted. - disallowedField bool - // IsDynamic signifies whether this struct is computed as part of an // expression and not part of the static evaluation tree. // Used for cycle detection. @@ -207,6 +210,20 @@ type Vertex struct { // hasPendingArc is set if this Vertex has a void arc (e.g. for comprehensions) hasPendingArc bool + // IsDisjunct indicates this Vertex is a disjunct resulting from a + // disjunction evaluation. + IsDisjunct bool + + // IsShared is true if BaseValue holds a Vertex of a node of another path. + // If a node is shared, the user should be careful with traversal. + // The debug printer, for instance, takes extra care not to print in a loop. + IsShared bool + + // IsCyclic is true if a node is cyclic, for instance if its value is + // a cyclic reference to a shared node or if the value is a conjunction + // of which at least one value is cyclic (not yet supported). + IsCyclic bool + // ArcType indicates the level of optionality of this arc. ArcType ArcType @@ -238,6 +255,10 @@ type Vertex struct { // // This value may be nil, in which case the Arcs are considered to define // the final value of this Vertex. + // + // TODO: all access to Conjuncts should go through functions like + // VisitLeafConjuncts and VisitAllConjuncts. We should probably make this + // an unexported field. Conjuncts []Conjunct // Structs is a slice of struct literals that contributed to this value. @@ -245,6 +266,37 @@ type Vertex struct { Structs []*StructInfo } +func deref(v *Vertex) *Vertex { + v = v.DerefValue() + n := v.state + if n != nil { + v = n.underlying + } + if v == nil { + panic("unexpected nil underlying with non-nil state") + } + return v +} + +func equalDeref(a, b *Vertex) bool { + return deref(a) == deref(b) +} + +// rootCloseContext creates a closeContext for this Vertex or returns the +// existing one. +func (v *Vertex) rootCloseContext(ctx *OpContext) *closeContext { + if v.cc == nil { + v.cc = &closeContext{ + group: (*ConjunctGroup)(&v.Conjuncts), + parent: nil, + src: v, + parentConjuncts: v, + } + v.cc.incDependent(ctx, ROOT, nil) // matched in REF(decrement:nodeDone) + } + return v.cc +} + // newInlineVertex creates a Vertex that is needed for computation, but for // which there is no CUE path defined from the root Vertex. func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct) *Vertex { @@ -259,9 +311,26 @@ func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct // updateArcType updates v.ArcType if t is more restrictive. func (v *Vertex) updateArcType(t ArcType) { - if t < v.ArcType { - v.ArcType = t + if t >= v.ArcType { + return } + if v.ArcType == ArcNotPresent { + return + } + s := v.state + if (s != nil || v.isFinal()) && s.ctx.isDevVersion() { + c := s.ctx + if s.scheduler.frozen.meets(arcTypeKnown) { + parent := v.Parent + parent.reportFieldCycleError(c, c.Source().Pos(), v.Label) + return + } + } + if v.Parent != nil && v.Parent.ArcType == ArcPending && v.Parent.state != nil && v.Parent.state.ctx.isDevVersion() { + // TODO: check that state is always non-nil. + v.Parent.state.unshare() + } + v.ArcType = t } // isDefined indicates whether this arc is a "value" field, and not a constraint @@ -324,6 +393,22 @@ const ( // We could also define types for required fields and potentially lets. ) +func (a ArcType) String() string { + switch a { + case ArcMember: + return "Member" + case ArcOptional: + return "Optional" + case ArcRequired: + return "Required" + case ArcPending: + return "Pending" + case ArcNotPresent: + return "NotPresent" + } + return fmt.Sprintf("ArcType(%d)", a) +} + // definitelyExists reports whether an arc is a constraint or member arc. // TODO: we should check that users of this call ensure there are no // ArcPendings. @@ -471,7 +556,11 @@ func (v *Vertex) IsUnprocessed() bool { } func (v *Vertex) updateStatus(s vertexStatus) { - Assertf(v.status <= s+1, "attempt to regress status from %d to %d", v.Status(), s) + if !isCyclePlaceholder(v.BaseValue) { + if !v.IsErr() && v.state != nil { + Assertf(v.state.ctx, v.status <= s+1, "attempt to regress status from %d to %d", v.Status(), s) + } + } if s == finalized && v.BaseValue == nil { // TODO: for debugging. @@ -501,6 +590,65 @@ func (v *Vertex) setParentDone() { } } +// VisitLeafConjuncts visits all conjuncts that are leafs of the ConjunctGroup tree. +func (v *Vertex) VisitLeafConjuncts(f func(Conjunct) bool) { + visitConjuncts(v.Conjuncts, f) +} + +func visitConjuncts(a []Conjunct, f func(Conjunct) bool) bool { + for _, c := range a { + switch x := c.x.(type) { + case *ConjunctGroup: + if !visitConjuncts(*x, f) { + return false + } + default: + if !f(c) { + return false + } + } + } + return true +} + +// VisitAllConjuncts visits all conjuncts of v, including ConjunctGroups. +// Note that ConjunctGroups do not have an Environment associated with them. +func (v *Vertex) VisitAllConjuncts(f func(c Conjunct, isLeaf bool)) { + visitAllConjuncts(v.Conjuncts, f) +} + +func visitAllConjuncts(a []Conjunct, f func(c Conjunct, isLeaf bool)) { + for _, c := range a { + switch x := c.x.(type) { + case *ConjunctGroup: + f(c, false) + visitAllConjuncts(*x, f) + default: + f(c, true) + } + } +} + +// SingleConjunct reports whether there is a single leaf conjunct and returns 1 +// if so. It will return 0 if there are no conjuncts or 2 if there are more than +// 1. +// +// This is an often-used operation. +func (v *Vertex) SingleConjunct() (c Conjunct, count int) { + if v == nil { + return c, 0 + } + v.VisitLeafConjuncts(func(x Conjunct) bool { + c = x + if count++; count > 1 { + return false + } + return true + }) + + return c, count +} + // Value returns the Value of v without definitions if it is a scalar // or itself otherwise. func (v *Vertex) Value() Value { @@ -529,6 +677,15 @@ func (v *Vertex) isUndefined() bool { return false } +// isFinal reports whether this node may no longer be modified. +func (v *Vertex) isFinal() bool { + // TODO(deref): the accounting of what is final should be recorded + // in the original node. Remove this dereference once the old + // evaluator has been removed. + v = v.DerefValue() + return v.status == finalized +} + func (x *Vertex) IsConcrete() bool { return x.Concreteness() <= Concrete } @@ -623,17 +780,26 @@ func toDataAll(ctx *OpContext, v BaseValue) BaseValue { // return v.Value == cycle // } +// IsErr is a convenience function to check whether a Vertex represents an +// error currently. It does not finalize the value, so it is possible that +// v may become erroneous after this call. func (v *Vertex) IsErr() bool { // if v.Status() > Evaluating { - if _, ok := v.BaseValue.(*Bottom); ok { - return true - } - // } - return false + return v.Bottom() != nil } +// Err finalizes v, if it isn't yet, and returns an error if v evaluates to an +// error or nil otherwise. func (v *Vertex) Err(c *OpContext) *Bottom { v.Finalize(c) + return v.Bottom() +} + +// Bottom reports whether v is currently erroneous It does not finalize the +// value, so it is possible that v may become erroneous after this call. +func (v *Vertex) Bottom() *Bottom { + // TODO: should we consider errors recorded in the state? + v = v.DerefValue() if b, ok := v.BaseValue.(*Bottom); ok { return b } @@ -647,13 +813,17 @@ func (v *Vertex) Finalize(c *OpContext) { // case the caller did not handle existing errors in the context. err := c.errs c.errs = nil - c.unify(v, finalized) + c.unify(v, final(finalized, allKnown)) c.errs = err } // CompleteArcs ensures the set of arcs has been computed. func (v *Vertex) CompleteArcs(c *OpContext) { - c.unify(v, conjuncts) + c.unify(v, final(conjuncts, allKnown)) +} + +func (v *Vertex) CompleteArcsOnly(c *OpContext) { + c.unify(v, final(conjuncts, fieldSetKnown)) } func (v *Vertex) AddErr(ctx *OpContext, b *Bottom) { @@ -667,6 +837,7 @@ func (v *Vertex) SetValue(ctx *OpContext, value BaseValue) *Bottom { func (v *Vertex) setValue(ctx *OpContext, state vertexStatus, value BaseValue) *Bottom { v.BaseValue = value + // TODO: should not set status here for new evaluator. v.updateStatus(state) return nil } @@ -693,7 +864,11 @@ func Unwrap(v Value) Value { if !ok { return v } - x = x.Indirect() + // TODO(deref): BaseValue is currently overloaded to track cycles as well + // as the actual or dereferenced value. Once the old evaluator can be + // removed, we should use the new cycle tracking mechanism for cycle + // detection and keep BaseValue clean. + x = x.DerefValue() if n := x.state; n != nil && isCyclePlaceholder(x.BaseValue) { if n.errs != nil && !n.errs.IsIncomplete() { return n.errs @@ -705,19 +880,6 @@ func Unwrap(v Value) Value { return x.Value() } -// Indirect unrolls indirections of Vertex values. These may be introduced, -// for instance, by temporary bindings such as comprehension values. -// It returns v itself if v does not point to another Vertex. -func (v *Vertex) Indirect() *Vertex { - for { - arc, ok := v.BaseValue.(*Vertex) - if !ok { - return v - } - v = arc - } -} - // OptionalType is a bit field of the type of optional constraints in use by an // Acceptor. type OptionalType int8 @@ -735,14 +897,14 @@ func (v *Vertex) Kind() Kind { // This is possible when evaluating comprehensions. It is potentially // not known at this time what the type is. switch { - // TODO: using this line would be more stable. - // case v.status != finalized && v.state != nil: + case v.state != nil && v.state.kind == BottomKind: + return BottomKind + case v.BaseValue != nil && !isCyclePlaceholder(v.BaseValue): + return v.BaseValue.Kind() case v.state != nil: return v.state.kind - case v.BaseValue == nil: - return TopKind default: - return v.BaseValue.Kind() + return TopKind } } @@ -770,10 +932,22 @@ func (v *Vertex) accepts(ok, required bool) bool { } func (v *Vertex) IsClosedStruct() bool { + // TODO: uncomment this. This fixes a bunch of closedness bugs + // in the old and new evaluator. For compability sake, though, we + // keep it as is for now. + // if v.Closed { + // return true + // } + // if v.HasEllipsis { + // return false + // } switch x := v.BaseValue.(type) { default: return false + case *Vertex: + return v.Closed && !v.HasEllipsis + case *StructMarker: if x.NeedClose { return true @@ -793,10 +967,23 @@ func (v *Vertex) IsClosedList() bool { // TODO: return error instead of boolean? (or at least have version that does.) func (v *Vertex) Accept(ctx *OpContext, f Feature) bool { + // TODO(#543): remove this check. + if f.IsDef() { + return true + } + if f.IsHidden() || f.IsLet() { return true } + // TODO(deref): right now a dereferenced value holds all the necessary + // closedness information. In the future we may want to allow sharing nodes + // with different closedness information. In that case, we should reconsider + // the use of this dereference. Consider, for instance: + // + // #a: b // this node is currently not shared, but could be. + // b: {c: 1} + v = v.DerefValue() if x, ok := v.BaseValue.(*Disjunction); ok { for _, v := range x.Values { if x, ok := v.(*Vertex); ok && x.Accept(ctx, f) { @@ -828,6 +1015,14 @@ func (v *Vertex) Accept(ctx *OpContext, f Feature) bool { } } + // TODO: move this check to IsClosedStruct. Right now this causes too many + // changes in the debug output, and it also appears to be not entirely + // correct. + if v.HasEllipsis { + return true + + } + if !v.IsClosedStruct() || v.Lookup(f) != nil { return true } @@ -854,6 +1049,24 @@ func (v *Vertex) MatchAndInsert(ctx *OpContext, arc *Vertex) { } s.MatchAndInsert(ctx, arc) } + + // This is the equivalent for the new implementation. + if pcs := v.PatternConstraints; pcs != nil { + for _, pc := range pcs.Pairs { + if matchPattern(ctx, pc.Pattern, arc.Label) { + for _, c := range pc.Constraint.Conjuncts { + env := *(c.Env) + if arc.Label.Index() < MaxIndex { + env.DynamicLabel = arc.Label + } + c.Env = &env + + root := arc.rootCloseContext(ctx) + root.insertConjunct(ctx, root, c, c.CloseInfo, ArcMember, true, false) + } + } + } + } } func (v *Vertex) IsList() bool { @@ -865,7 +1078,28 @@ func (v *Vertex) IsList() bool { func (v *Vertex) Lookup(f Feature) *Vertex { for _, a := range v.Arcs { if a.Label == f { - a = a.Indirect() + // TODO(P1)/TODO(deref): this indirection should ultimately be + // eliminated: the original node may have useful information (like + // original conjuncts) that are eliminated after indirection. We + // should leave it up to the user of Lookup at what point an + // indirection is necessary. + a = a.DerefValue() + return a + } + } + return nil +} + +// LookupRaw returns the Arc with label f if it exists or nil otherwise. +// +// TODO: with the introduction of structure sharing, it is not always correct +// to indirect the arc. At the very least, this discards potential useful +// information. We introduce LookupRaw to avoid having to delete the +// information. Ultimately, this should become Lookup, or better, we should +// have a higher-level API for accessing values. +func (v *Vertex) LookupRaw(f Feature) *Vertex { + for _, a := range v.Arcs { + if a.Label == f { return a } } @@ -884,6 +1118,10 @@ func (v *Vertex) Elems() []*Vertex { return a } +func (v *Vertex) Init(c *OpContext) { + v.getState(c) +} + // GetArc returns a Vertex for the outgoing arc with label f. It creates and // ads one if it doesn't yet exist. func (v *Vertex) GetArc(c *OpContext, f Feature, t ArcType) (arc *Vertex, isNew bool) { @@ -893,6 +1131,10 @@ func (v *Vertex) GetArc(c *OpContext, f Feature, t ArcType) (arc *Vertex, isNew return arc, false } + if c.isDevVersion() { + return nil, false + } + if v.LockArcs { // TODO(errors): add positions. if f.IsInt() { @@ -951,7 +1193,11 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) { default: v.ArcType = ArcMember } - for _, x := range v.Conjuncts { + return hasConjunct(v.Conjuncts, c) +} + +func hasConjunct(cs []Conjunct, c Conjunct) bool { + for _, x := range cs { // TODO: disregard certain fields from comparison (e.g. Refs)? if x.CloseInfo.closeInfo == c.CloseInfo.closeInfo && x.x == c.x && @@ -963,6 +1209,8 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) { } func (n *nodeContext) addConjunction(c Conjunct, index int) { + unreachableForDev(n.ctx) + // NOTE: This does not split binary expressions for comprehensions. // TODO: split for comprehensions and rewrap? if x, ok := c.Elem().(*BinaryExpr); ok && x.Op == AndOp { @@ -978,7 +1226,11 @@ func (n *nodeContext) addConjunction(c Conjunct, index int) { func (v *Vertex) addConjunctUnchecked(c Conjunct) { index := len(v.Conjuncts) v.Conjuncts = append(v.Conjuncts, c) - if n := v.state; n != nil { + if n := v.state; n != nil && !n.ctx.isDevVersion() { + // TODO(notify): consider this as a central place to send out + // notifications. At the moment this is not necessary, but it may + // be if we move the notification mechanism outside of the path of + // running tasks. n.addConjunction(c, index) // TODO: can we remove notifyConjunct here? This method is only @@ -992,6 +1244,8 @@ func (v *Vertex) addConjunctUnchecked(c Conjunct) { // addConjunctDynamic adds a conjunct to a vertex and immediately evaluates // it, whilst doing the same for any vertices on the notify list, recursively. func (n *nodeContext) addConjunctDynamic(c Conjunct) { + unreachableForDev(n.ctx) + n.node.Conjuncts = append(n.node.Conjuncts, c) n.addExprConjunct(c, partial) n.notifyConjunct(c) @@ -999,6 +1253,8 @@ func (n *nodeContext) addConjunctDynamic(c Conjunct) { } func (n *nodeContext) notifyConjunct(c Conjunct) { + unreachableForDev(n.ctx) + for _, rec := range n.notify { arc := rec.v if !arc.hasConjunct(c) { @@ -1006,7 +1262,7 @@ func (n *nodeContext) notifyConjunct(c Conjunct) { // TODO: continuing here is likely to result in a faulty // (incomplete) configuration. But this may be okay. The // CUE_DEBUG=0 flag disables this assertion. - n.ctx.Assertf(n.ctx.pos(), Debug, "unexpected nil state") + n.ctx.Assertf(n.ctx.pos(), !n.ctx.Strict, "unexpected nil state") n.ctx.addErrf(0, n.ctx.pos(), "cannot add to field %v", arc.Label) continue } @@ -1051,7 +1307,7 @@ func appendPath(a []Feature, v *Vertex) []Feature { return a } -// An Conjunct is an Environment-Expr pair. The Environment is the starting +// A Conjunct is an Environment-Expr pair. The Environment is the starting // point for reference lookup for any reference contained in X. type Conjunct struct { Env *Environment @@ -1109,24 +1365,66 @@ func (c *Conjunct) Elem() Elem { } } -// Expr retrieves the expression form of the contained conjunct. -// If it is a field or comprehension, it will return its associated value. +// Expr retrieves the expression form of the contained conjunct. If it is a +// field or comprehension, it will return its associated value. This is only to +// be used for syntactic operations where evaluation of the expression is not +// required. To get an expression paired with the correct environment, use +// EnvExpr. +// +// TODO: rename to RawExpr. func (c *Conjunct) Expr() Expr { return ToExpr(c.x) } +// EnvExpr returns the expression form of the contained conjunct alongside an +// Environment in which this expression should be evaluated. +func (c Conjunct) EnvExpr() (*Environment, Expr) { + return EnvExpr(c.Env, c.Elem()) +} + +// EnvExpr returns the expression represented by Elem alongside an Environment +// with the necessary adjustments in which the resulting expression can be +// evaluated. +func EnvExpr(env *Environment, elem Elem) (*Environment, Expr) { + for { + switch x := elem.(type) { + case *ConjunctGroup: + if len(*x) == 1 { + c := (*x)[0] + env = c.Env + elem = c.Elem() + continue + } + case *Comprehension: + env = linkChildren(env, x) + c := MakeConjunct(env, x.Value, CloseInfo{}) + elem = c.Elem() + continue + } + break + } + return env, ToExpr(elem) +} + // ToExpr extracts the underlying expression for a Node. If something is already // an Expr, it will return it as is, if it is a field, it will return its value, // and for comprehensions it returns the yielded struct. func ToExpr(n Node) Expr { - switch x := n.(type) { - case Expr: - return x - case interface{ expr() Expr }: - return x.expr() - case *Comprehension: - return ToExpr(x.Value) - default: - panic("unreachable") + for { + switch x := n.(type) { + case *ConjunctGroup: + if len(*x) != 1 { + return x + } + n = (*x)[0].x + case Expr: + return x + case interface{ expr() Expr }: + n = x.expr() + case *Comprehension: + n = x.Value + default: + panic("unreachable") + } } } diff --git a/vendor/cuelang.org/go/internal/core/adt/comprehension.go b/vendor/cuelang.org/go/internal/core/adt/comprehension.go index 0f0cbeb78b..5cdd2ed61c 100644 --- a/vendor/cuelang.org/go/internal/core/adt/comprehension.go +++ b/vendor/cuelang.org/go/internal/core/adt/comprehension.go @@ -151,13 +151,16 @@ func (n *nodeContext) insertComprehension( } if ec.done && len(ec.envs) == 0 { + n.decComprehension(c) return } x := c.Value - ci = ci.SpawnEmbed(c) - ci.closeInfo.span |= ComprehensionSpan + if !n.ctx.isDevVersion() { + ci = ci.SpawnEmbed(c) + ci.closeInfo.span |= ComprehensionSpan + } var decls []Decl switch v := ToExpr(x).(type) { @@ -174,7 +177,8 @@ func (n *nodeContext) insertComprehension( Syntax: c.Syntax, Clauses: c.Clauses, Value: f, - arcType: f.ArcType, + arcType: f.ArcType, // TODO: can be derived, remove this field. + cc: ci.cc, comp: ec, parent: c, @@ -182,9 +186,16 @@ func (n *nodeContext) insertComprehension( } conjunct := MakeConjunct(env, c, ci) - n.node.state.insertFieldUnchecked(f.Label, ArcPending, conjunct) + if n.ctx.isDevVersion() { + n.assertInitialized() + _, c.arcCC = n.insertArcCC(f.Label, ArcPending, conjunct, conjunct.CloseInfo, false) + c.cc = ci.cc + ci.cc.incDependent(n.ctx, COMP, c.arcCC) + } else { + n.insertFieldUnchecked(f.Label, ArcPending, conjunct) + } + fields = append(fields, f) - // TODO: adjust ci to embed? case *LetField: // TODO: consider merging this case with the LetField case. @@ -203,7 +214,8 @@ func (n *nodeContext) insertComprehension( } conjunct := MakeConjunct(env, c, ci) - arc := n.node.state.insertFieldUnchecked(f.Label, ArcMember, conjunct) + n.assertInitialized() + arc := n.insertFieldUnchecked(f.Label, ArcMember, conjunct) arc.MultiLet = f.IsMulti fields = append(fields, f) @@ -232,7 +244,7 @@ func (n *nodeContext) insertComprehension( case !ec.done: ec.structs = append(ec.structs, st) case len(ec.envs) > 0: - st.Init() + st.Init(n.ctx) } } @@ -251,13 +263,19 @@ func (n *nodeContext) insertComprehension( } } - n.comprehensions = append(n.comprehensions, envYield{ - envComprehension: ec, - leaf: c, - env: env, - id: ci, - expr: x, - }) + if n.ctx.isDevVersion() { + t := n.scheduleTask(handleComprehension, env, x, ci) + t.comp = ec + t.leaf = c + } else { + n.comprehensions = append(n.comprehensions, envYield{ + envComprehension: ec, + leaf: c, + env: env, + id: ci, + expr: x, + }) + } } type compState struct { @@ -274,14 +292,14 @@ func (c *OpContext) yield( node *Vertex, // errors are associated with this node env *Environment, // env for field for which this yield is called comp *Comprehension, - state vertexStatus, + state combinedFlags, f YieldFunc, // called for every result ) *Bottom { s := &compState{ ctx: c, comp: comp, f: f, - state: state, + state: state.vertexStatus(), } y := comp.Clauses[0] @@ -321,6 +339,8 @@ func (s *compState) yield(env *Environment) (ok bool) { // embeddings before inserting the results to ensure that the order of // evaluation does not matter. func (n *nodeContext) injectComprehensions(state vertexStatus) (progress bool) { + unreachableForDev(n.ctx) + workRemaining := false // We use variables, instead of range, as the list may grow dynamically. @@ -371,6 +391,8 @@ func (n *nodeContext) injectComprehensions(state vertexStatus) (progress bool) { // as iterating over the node in which they are defined. Such comprehensions // are legal as long as they do not modify the arc set of the node. func (n *nodeContext) injectSelfComprehensions(state vertexStatus) { + unreachableForDev(n.ctx) + // We use variables, instead of range, as the list may grow dynamically. for i := 0; i < len(n.selfComprehensions); i++ { n.processComprehension(&n.selfComprehensions[i], state) @@ -382,6 +404,27 @@ func (n *nodeContext) injectSelfComprehensions(state vertexStatus) { // It returns an incomplete error if there was one. Fatal errors are // processed as a "successfully" completed computation. func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bottom { + err := n.processComprehensionInner(d, state) + + // NOTE: we cannot move this to defer in processComprehensionInner, as we + // use panics to implement "yielding" (and possibly coroutines in the + // future). + n.decComprehension(d.leaf) + + return err +} + +func (n *nodeContext) decComprehension(p *Comprehension) { + for ; p != nil; p = p.parent { + cc := p.cc + if cc != nil { + cc.decDependent(n.ctx, COMP, p.arcCC) + } + p.cc = nil + } +} + +func (n *nodeContext) processComprehensionInner(d *envYield, state vertexStatus) *Bottom { ctx := n.ctx // Compute environments, if needed. @@ -391,7 +434,7 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot envs = append(envs, env) } - if err := ctx.yield(d.vertex, d.env, d.comp, state, f); err != nil { + if err := ctx.yield(d.vertex, d.env, d.comp, oldOnly(state), f); err != nil { if err.IsIncomplete() { return err } @@ -410,7 +453,7 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot if len(d.envs) > 0 { for _, s := range d.structs { - s.Init() + s.Init(n.ctx) } } d.structs = nil @@ -420,20 +463,59 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot d.inserted = true if len(d.envs) == 0 { + c := d.leaf + for p := c.arcCC; p != nil; p = p.parent { + // because the parent referrer will reach a zero count before this + // node will reach a zero count, we need to propagate the arcType. + p.updateArcType(ArcNotPresent) + } return nil } v := n.node + f := v.Label for c := d.leaf; c.parent != nil; c = c.parent { + // because the parent referrer will reach a zero count before this + // node will reach a zero count, we need to propagate the arcType. + for arc, p := c.arcCC, c.cc; p != nil; arc, p = arc.parent, p.parent { + // TODO: remove this line once we use the arcType of the + // closeContext in notAllowedError. + arc.src.updateArcType(c.arcType) + t := arc.arcType + arc.updateArcType(c.arcType) + if p.isClosed && t >= ArcPending && !matchPattern(ctx, p.Expr, f) { + ctx.notAllowedError(p.src, arc.src) + } + } v.updateArcType(c.arcType) + if v.ArcType == ArcNotPresent { + parent := v.Parent + b := parent.reportFieldCycleError(ctx, d.comp.Syntax.Pos(), v.Label) + d.envComprehension.vertex.state.addBottom(b) + ctx.current().err = b + ctx.current().state = taskFAILED + return nil + } v = c.arc } id := d.id for _, env := range d.envs { + if n.node.ArcType == ArcNotPresent { + b := n.node.reportFieldCycleError(ctx, d.comp.Syntax.Pos(), n.node.Label) + ctx.current().err = b + n.yield() + return nil + } + env = linkChildren(env, d.leaf) - n.addExprConjunct(Conjunct{env, d.expr, id}, state) + + if ctx.isDevVersion() { + n.scheduleConjunct(Conjunct{env, d.expr, id}, id) + } else { + n.addExprConjunct(Conjunct{env, d.expr, id}, state) + } } return nil diff --git a/vendor/cuelang.org/go/internal/core/adt/conjunct.go b/vendor/cuelang.org/go/internal/core/adt/conjunct.go new file mode 100644 index 0000000000..b0701a69e7 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/conjunct.go @@ -0,0 +1,677 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + + "cuelang.org/go/cue/ast" +) + +// This file contains functionality for processing conjuncts to insert the +// corresponding values in the Vertex. +// +// Conjuncts are divided into two classes: +// - literal values that need no evaluation: these are inserted directly into +// the Vertex. +// - field or value expressions that need to be evaluated: these are inserted +// as a task into the Vertex' associated scheduler for later evaluation. +// The implementation of these tasks can be found in tasks.go. +// +// The main entrypoint is scheduleConjunct. + +// scheduleConjunct splits c into parts to be incrementally processed and queues +// these parts up for processing. it will itself not cause recursive processing. +func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) { + n.assertInitialized() + + // Explanation of switch statement: + // + // A Conjunct can be a leaf or, through a ConjunctGroup, a tree. The tree + // reflects the history of how the conjunct was inserted in terms of + // definitions and embeddings. This, in turn, is used to compute closedness. + // + // Once all conjuncts for a Vertex have been collected, this tree contains + // all the information needed to trace its histroy: if a Vertex is + // referenced in an expression, this tree can be used to insert the + // conjuncts keeping closedness in mind. + // + // In the collection phase, however, this is not sufficient. CUE computes + // conjuncts "out of band". This means that conjuncts accumulate in + // different parts of the tree in an indeterminate order. closeContext is + // used to account for this. + // + // Basically, if the closeContext associated with c belongs to n, we take + // it that the conjunct needs to be inserted at the point in the tree + // associated by this closeContext. If, on the other hand, the closeContext + // is not defined or does not belong to this node, we take this conjunct + // is inserted by means of a reference. In this case we assume that the + // computation of the tree has completed and the tree can be used to reflect + // the closedness structure. + // + // TODO: once the evaluator is done and all tests pass, consider having + // two different entry points to account for these cases. + switch cc := c.CloseInfo.cc; { + case cc == nil || cc.src != n.node: + // In this case, a Conjunct is inserted from another Arc. If the + // conjunct represents an embedding or definition, we need to create a + // new closeContext to represent this. + if id.cc == nil { + id.cc = n.node.rootCloseContext(n.ctx) + } + if id.cc == cc { + panic("inconsistent state: same closeContext") + } + var t closeNodeType + if c.CloseInfo.FromDef { + t |= closeDef + } + if c.CloseInfo.FromEmbed { + t |= closeEmbed + } + if t != 0 { + id, _ = id.spawnCloseContext(n.ctx, t) + } + if !id.cc.done { + id.cc.incDependent(n.ctx, DEFER, nil) + defer id.cc.decDependent(n.ctx, DEFER, nil) + } + + if id.cc.src != n.node { + panic("inconsistent state: nodes differ") + } + default: + + // In this case, the conjunct is inserted as the result of an expansion + // of a conjunct in place, not a reference. In this case, we must use + // the cached closeContext. + id.cc = cc + + // Note this subtlety: we MUST take the cycle info from c when this is + // an in place evaluated node, otherwise we must take that of id. + id.CycleInfo = c.CloseInfo.CycleInfo + } + + if id.cc.needsCloseInSchedule != nil { + dep := id.cc.needsCloseInSchedule + id.cc.needsCloseInSchedule = nil + defer id.cc.decDependent(n.ctx, EVAL, dep) + } + + env := c.Env + + if id.cc.isDef { + n.node.Closed = true + } + + switch x := c.Elem().(type) { + case *ConjunctGroup: + for _, c := range *x { + // TODO(perf): can be one loop + + cc := c.CloseInfo.cc + if cc.src == n.node && cc.needsCloseInSchedule != nil { + // We need to handle this specifically within the ConjunctGroup + // loop, because multiple conjuncts may be using the same root + // closeContext. This can be merged once Vertex.Conjuncts is an + // interface, requiring any list to be a root conjunct. + + dep := cc.needsCloseInSchedule + cc.needsCloseInSchedule = nil + defer cc.decDependent(n.ctx, EVAL, dep) + } + } + for _, c := range *x { + n.scheduleConjunct(c, id) + } + + case *Vertex: + // TODO: move this logic to scheduleVertexConjuncts or at least ensure + // that we can also share data Vertices? + if x.IsData() { + n.unshare() + n.insertValueConjunct(env, x, id) + } else { + n.scheduleVertexConjuncts(c, x, id) + } + + case Value: + // TODO: perhaps some values could be shared. + n.unshare() + n.insertValueConjunct(env, x, id) + + case *BinaryExpr: + // NOTE: do not unshare: a conjunction could still allow structure + // sharing, such as in the case of `ref & ref`. + if x.Op == AndOp { + n.scheduleConjunct(MakeConjunct(env, x.X, id), id) + n.scheduleConjunct(MakeConjunct(env, x.Y, id), id) + return + } + + n.unshare() + // Even though disjunctions and conjunctions are excluded, the result + // must may still be list in the case of list arithmetic. This could + // be a scalar value only once this is no longer supported. + n.scheduleTask(handleExpr, env, x, id) + + case *StructLit: + n.unshare() + n.scheduleStruct(env, x, id) + + case *ListLit: + n.unshare() + + // At this point we known we have at least an empty list. + n.updateCyclicStatus(id) + + env := &Environment{ + Up: env, + Vertex: n.node, + } + n.scheduleTask(handleListLit, env, x, id) + + case *DisjunctionExpr: + n.unshare() + + // TODO(perf): reuse envDisjunct values so that we can also reuse the + // disjunct slice. + d := envDisjunct{ + env: env, + cloneID: id, + src: x, + expr: x, + } + for _, dv := range x.Values { + d.disjuncts = append(d.disjuncts, disjunct{ + expr: dv.Val, + isDefault: dv.Default, + mode: mode(x.HasDefaults, dv.Default), + }) + } + n.scheduleDisjunction(d) + + case *Comprehension: + // always a partial comprehension. + n.insertComprehension(env, x, id) + + case Resolver: + n.scheduleTask(handleResolver, env, x, id) + + case Evaluator: + n.unshare() + // Interpolation, UnaryExpr, CallExpr + n.scheduleTask(handleExpr, env, x, id) + + default: + panic("unreachable") + } + + n.ctx.stats.Conjuncts++ +} + +// scheduleStruct records all elements of this conjunct in the structure and +// then processes it. If an element needs to be inserted for evaluation, +// it may be scheduled. +func (n *nodeContext) scheduleStruct(env *Environment, + s *StructLit, + ci CloseInfo) { + n.updateCyclicStatus(ci) + + // NOTE: This is a crucial point in the code: + // Unification dereferencing happens here. The child nodes are set to + // an Environment linked to the current node. Together with the De Bruijn + // indices, this determines to which Vertex a reference resolves. + + childEnv := &Environment{ + Up: env, + Vertex: n.node, + } + + hasEmbed := false + hasEllipsis := false + + // TODO: do we still need this? + // shouldClose := ci.cc.isDef || ci.cc.isClosedOnce + + s.Init(n.ctx) + + // TODO: do we still need to AddStruct and do we still need to Disable? + parent := n.node.AddStruct(s, childEnv, ci) + parent.Disable = true // disable until processing is done. + ci.IsClosed = false + + // TODO: precompile +loop1: + for _, d := range s.Decls { + switch d.(type) { + case *Ellipsis: + hasEllipsis = true + break loop1 + } + } + + // TODO(perf): precompile whether struct has embedding. +loop2: + for _, d := range s.Decls { + switch d.(type) { + case *Comprehension, Expr: + // No need to increment and decrement, as there will be at least + // one entry. + if _, ok := s.Src.(*ast.File); !ok { + // If this is not a file, the struct indicates the scope/ + // boundary at which closedness should apply. This is not true + // for files. + // TODO: set this as a flag in StructLit so as to not have to + // do the somewhat dangerous cast here. + ci, _ = ci.spawnCloseContext(n.ctx, 0) + } + // Note: adding a count is not needed here, as there will be an + // embed spawn below. + hasEmbed = true + break loop2 + } + } + + // First add fixed fields and schedule expressions. + for _, d := range s.Decls { + switch x := d.(type) { + case *Field: + if x.Label.IsString() && x.ArcType == ArcMember { + n.aStruct = s + n.aStructID = ci + } + fc := MakeConjunct(childEnv, x, ci) + // fc.CloseInfo.cc = nil // TODO: should we add this? + n.insertArc(x.Label, x.ArcType, fc, ci, true) + + case *LetField: + lc := MakeConjunct(childEnv, x, ci) + n.insertArc(x.Label, ArcMember, lc, ci, true) + + case *Comprehension: + ci, cc := ci.spawnCloseContext(n.ctx, closeEmbed) + cc.incDependent(n.ctx, DEFER, nil) + defer cc.decDependent(n.ctx, DEFER, nil) + n.insertComprehension(childEnv, x, ci) + hasEmbed = true + + case *Ellipsis: + // Can be added unconditionally to patterns. + ci.cc.isDef = false + ci.cc.isClosed = false + + case *DynamicField: + if x.ArcType == ArcMember { + n.aStruct = s + n.aStructID = ci + } + n.scheduleTask(handleDynamic, childEnv, x, ci) + + case *BulkOptionalField: + + // All do not depend on each other, so can be added at once. + n.scheduleTask(handlePatternConstraint, childEnv, x, ci) + + case Expr: + // TODO: perhaps special case scalar Values to avoid creating embedding. + ci, cc := ci.spawnCloseContext(n.ctx, closeEmbed) + + // TODO: do we need to increment here? + cc.incDependent(n.ctx, DEFER, nil) // decrement deferred below + defer cc.decDependent(n.ctx, DEFER, nil) + + ec := MakeConjunct(childEnv, x, ci) + n.scheduleConjunct(ec, ci) + hasEmbed = true + } + } + if hasEllipsis { + ci.cc.hasEllipsis = true + } + if !hasEmbed { + n.aStruct = s + n.aStructID = ci + ci.cc.hasNonTop = true + } + + // TODO: probably no longer necessary. + parent.Disable = false +} + +// scheduleVertexConjuncts injects the conjuncst of src n. If src was not fully +// evaluated, it subscribes dst for future updates. +func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo CloseInfo) { + // disjunctions, we need to dereference he underlying node. + if deref(n.node) == deref(arc) { + return + } + + if n.shareIfPossible(c, arc, closeInfo) { + arc.getState(n.ctx) + return + } + + // We need to ensure that each arc is only unified once (or at least) a + // bounded time, witch each conjunct. Comprehensions, for instance, may + // distribute a value across many values that get unified back into the + // same value. If such a value is a disjunction, than a disjunction of N + // disjuncts will result in a factor N more unifications for each + // occurrence of such value, resulting in exponential running time. This + // is especially common values that are used as a type. + // + // However, unification is idempotent, so each such conjunct only needs + // to be unified once. This cache checks for this and prevents an + // exponential blowup in such case. + // + // TODO(perf): this cache ensures the conjuncts of an arc at most once + // per ID. However, we really need to add the conjuncts of an arc only + // once total, and then add the close information once per close ID + // (pointer can probably be shared). Aside from being more performant, + // this is probably the best way to guarantee that conjunctions are + // linear in this case. + + ciKey := closeInfo + ciKey.Refs = nil + ciKey.Inline = false + key := arcKey{arc, ciKey} + for _, k := range n.arcMap { + if key == k { + return + } + } + n.arcMap = append(n.arcMap, key) + + if IsDef(c.Expr()) { + // TODO: or should we always insert the wrapper (for errors)? + ci, dc := closeInfo.spawnCloseContext(n.ctx, closeDef) + closeInfo = ci + + dc.incDependent(n.ctx, DEFER, nil) // decrement deferred below + defer dc.decDependent(n.ctx, DEFER, nil) + } + + if !n.node.nonRooted || n.node.IsDynamic { + if state := arc.getBareState(n.ctx); state != nil { + state.addNotify2(n.node, closeInfo) + } + } + + if d, ok := arc.BaseValue.(*Disjunction); ok && false { + n.scheduleConjunct(MakeConjunct(c.Env, d, closeInfo), closeInfo) + } else { + for i := 0; i < len(arc.Conjuncts); i++ { + c := arc.Conjuncts[i] + + // Note that we are resetting the tree here. We hereby assume that + // closedness conflicts resulting from unifying the referenced arc were + // already caught there and that we can ignore further errors here. + // c.CloseInfo = closeInfo + + // We can use the original, but we know it will not be used + + n.scheduleConjunct(c, closeInfo) + } + } + + if state := arc.getBareState(n.ctx); state != nil { + n.toComplete = true + } +} + +func (n *nodeContext) addNotify2(v *Vertex, c CloseInfo) []receiver { + // No need to do the notification mechanism if we are already complete. + old := n.notify + switch { + case n.node.isFinal(): + return old + case !n.node.isInProgress(): + case n.meets(allAncestorsProcessed): + return old + } + + // Create a "root" closeContext to reflect the entry point of the + // reference into n.node relative to cc within v. After that, we can use + // assignConjunct to add new conjuncts. + + // TODO: dedup: only add if t does not already exist. First check if this + // is even possible by adding a panic. + root := n.node.rootCloseContext(n.ctx) + if root.isDecremented { + return old + } + + for _, r := range n.notify { + if r.v == v && r.cc == c.cc { + return old + } + } + + cc := c.cc + + if root.linkNotify(n.ctx, v, cc, c.CycleInfo) { + n.notify = append(n.notify, receiver{v, cc}) + } + + return old +} + +// Literal conjuncts + +func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInfo) { + n.updateCyclicStatus(id) + + ctx := n.ctx + + switch x := v.(type) { + case *Vertex: + if m, ok := x.BaseValue.(*StructMarker); ok { + n.aStruct = x + n.aStructID = id + if m.NeedClose { + // TODO: In the new evaluator this is used to mark a struct + // as closed in the debug output. Once the old evaluator is + // gone, we could simplify this. + id.IsClosed = true + if ctx.isDevVersion() { + var cc *closeContext + id, cc = id.spawnCloseContext(n.ctx, 0) + cc.isClosedOnce = true + } + } + } + + if !x.IsData() { + c := MakeConjunct(env, x, id) + n.scheduleVertexConjuncts(c, x, id) + return + } + + // TODO: evaluate value? + switch v := x.BaseValue.(type) { + default: + panic(fmt.Sprintf("invalid type %T", x.BaseValue)) + + case *ListMarker: + n.updateCyclicStatus(id) + + // TODO: arguably we know now that the type _must_ be a list. + n.scheduleTask(handleListVertex, env, x, id) + + return + + case *StructMarker: + for _, a := range x.Arcs { + if a.ArcType != ArcMember { + continue + } + // TODO(errors): report error when this is a regular field. + c := MakeConjunct(nil, a, id) + n.insertArc(a.Label, a.ArcType, c, id, true) + } + + case Value: + n.insertValueConjunct(env, v, id) + } + + return + + case *Bottom: + id.cc.hasNonTop = true + n.addBottom(x) + return + + case *Builtin: + id.cc.hasNonTop = true + if v := x.BareValidator(); v != nil { + n.insertValueConjunct(env, v, id) + return + } + } + + if !n.updateNodeType(v.Kind(), v, id) { + return + } + + switch x := v.(type) { + case *Disjunction: + // TODO(perf): reuse envDisjunct values so that we can also reuse the + // disjunct slice. + d := envDisjunct{ + env: env, + cloneID: id, + src: x, + value: x, + } + for i, dv := range x.Values { + d.disjuncts = append(d.disjuncts, disjunct{ + expr: dv, + isDefault: i < x.NumDefaults, + mode: mode(x.HasDefaults, i < x.NumDefaults), + }) + } + n.scheduleDisjunction(d) + + case *Conjunction: + // TODO: consider sharing: conjunct could be `ref & ref`, for instance, + // in which case ref could still be shared. + + for _, x := range x.Values { + n.insertValueConjunct(env, x, id) + } + + case *Top: + n.hasTop = true + id.cc.hasTop = true + + case *BasicType: + id.cc.hasNonTop = true + + case *BoundValue: + id.cc.hasNonTop = true + switch x.Op { + case LessThanOp, LessEqualOp: + if y := n.upperBound; y != nil { + n.upperBound = nil + v := SimplifyBounds(ctx, n.kind, x, y) + if err := valueError(v); err != nil { + err.AddPosition(v) + err.AddPosition(n.upperBound) + err.AddClosedPositions(id) + } + n.insertValueConjunct(env, v, id) + return + } + n.upperBound = x + + case GreaterThanOp, GreaterEqualOp: + if y := n.lowerBound; y != nil { + n.lowerBound = nil + v := SimplifyBounds(ctx, n.kind, x, y) + if err := valueError(v); err != nil { + err.AddPosition(v) + err.AddPosition(n.lowerBound) + err.AddClosedPositions(id) + } + n.insertValueConjunct(env, v, id) + return + } + n.lowerBound = x + + case EqualOp, NotEqualOp, MatchOp, NotMatchOp: + // This check serves as simplifier, but also to remove duplicates. + k := 0 + match := false + for _, c := range n.checks { + if y, ok := c.(*BoundValue); ok { + switch z := SimplifyBounds(ctx, n.kind, x, y); { + case z == y: + match = true + case z == x: + continue + } + } + n.checks[k] = c + k++ + } + n.checks = n.checks[:k] + if !match { + n.checks = append(n.checks, x) + } + return + } + + case Validator: + // This check serves as simplifier, but also to remove duplicates. + for i, y := range n.checks { + if b := SimplifyValidator(ctx, x, y); b != nil { + n.checks[i] = b + return + } + } + n.updateNodeType(x.Kind(), x, id) + n.checks = append(n.checks, x) + + case *Vertex: + // handled above. + + case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin + if y := n.scalar; y != nil { + if b, ok := BinOp(ctx, EqualOp, x, y).(*Bool); !ok || !b.B { + n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id) + } + break + } + n.scalar = x + n.scalarID = id + n.signal(scalarKnown) + + default: + panic(fmt.Sprintf("unknown value type %T", x)) + } + + if n.lowerBound != nil && n.upperBound != nil { + if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil { + if err := valueError(u); err != nil { + err.AddPosition(n.lowerBound) + err.AddPosition(n.upperBound) + err.AddClosedPositions(id) + } + n.lowerBound = nil + n.upperBound = nil + n.insertValueConjunct(env, u, id) + } + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/constraints.go b/vendor/cuelang.org/go/internal/core/adt/constraints.go index c2e0723990..fe78f4dd75 100644 --- a/vendor/cuelang.org/go/internal/core/adt/constraints.go +++ b/vendor/cuelang.org/go/internal/core/adt/constraints.go @@ -109,8 +109,8 @@ func (n *nodeContext) insertConstraint(pattern Value, c Conjunct) bool { // matchPattern reports whether f matches pattern. The result reflects // whether unification of pattern with f converted to a CUE value succeeds. -func matchPattern(n *nodeContext, pattern Value, f Feature) bool { - if pattern == nil { +func matchPattern(ctx *OpContext, pattern Value, f Feature) bool { + if pattern == nil || !f.IsRegular() { return false } @@ -121,13 +121,11 @@ func matchPattern(n *nodeContext, pattern Value, f Feature) bool { // avoid many bound checks), which we probably should. Especially when we // allow list constraints, like [<10]: T. var label Value - if int64(f.Index()) == MaxIndex { - f = 0 - } else if f.IsString() { - label = f.ToValue(n.ctx) + if f.IsString() && int64(f.Index()) != MaxIndex { + label = f.ToValue(ctx) } - return matchPatternValue(n.ctx, pattern, f, label) + return matchPatternValue(ctx, pattern, f, label) } // matchPatternValue matches a concrete value against f. label must be the @@ -143,6 +141,14 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r return true } + k := IntKind + if f.IsString() { + k = StringKind + } + if !k.IsAnyOf(pattern.Kind()) { + return false + } + // Fast track for the majority of cases. switch x := pattern.(type) { case *Bottom: @@ -150,7 +156,7 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r if x == cycle { err := ctx.NewPosf(pos(pattern), "cyclic pattern constraint") for _, c := range ctx.vertex.Conjuncts { - err.AddPosition(c.Elem()) + addPositions(err, c) } ctx.AddBottom(&Bottom{ Err: err, @@ -165,7 +171,6 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r return true case *BasicType: - k := label.Kind() return x.K&k == k case *BoundValue: @@ -190,6 +195,9 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r return err == nil && xi == yi case *String: + if label == nil { + return false + } y, ok := label.(*String) return ok && x.Str == y.Str diff --git a/vendor/cuelang.org/go/internal/core/adt/context.go b/vendor/cuelang.org/go/internal/core/adt/context.go index d3f2e34508..9e42ba9402 100644 --- a/vendor/cuelang.org/go/internal/core/adt/context.go +++ b/vendor/cuelang.org/go/internal/core/adt/context.go @@ -17,7 +17,6 @@ package adt import ( "fmt" "log" - "os" "reflect" "regexp" "sort" @@ -30,25 +29,18 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/stats" "cuelang.org/go/cue/token" + "cuelang.org/go/internal" + "cuelang.org/go/internal/cuedebug" ) -// Debug sets whether extra aggressive checking should be done. -// This should typically default to true for pre-releases and default to -// false otherwise. -var Debug bool = os.Getenv("CUE_DEBUG") != "0" - -// Verbosity sets the log level. There are currently only two levels: -// -// 0: no logging -// 1: logging -var Verbosity int - // DebugSort specifies that arcs be sorted consistently between implementations. // // 0: default // 1: sort by Feature: this should be consistent between implementations where // there is no change in the compiler and indexing code. // 2: alphabetical +// +// TODO: move to DebugFlags var DebugSort int func DebugSortArcs(c *OpContext, n *Vertex) { @@ -91,8 +83,8 @@ func DebugSortFields(c *OpContext, a []Feature) { // // It is advisable for each use of Assert to document how the error is expected // to be handled down the line. -func Assertf(b bool, format string, args ...interface{}) { - if Debug && !b { +func Assertf(c *OpContext, b bool, format string, args ...interface{}) { + if c.Strict && !b { panic(fmt.Sprintf("assertion failed: "+format, args...)) } } @@ -100,7 +92,7 @@ func Assertf(b bool, format string, args ...interface{}) { // Assertf either panics or reports an error to c if the condition is not met. func (c *OpContext) Assertf(pos token.Pos, b bool, format string, args ...interface{}) { if !b { - if Debug { + if c.Strict { panic(fmt.Sprintf("assertion failed: "+format, args...)) } c.addErrf(0, pos, format, args...) @@ -114,7 +106,7 @@ func init() { var pMap = map[*Vertex]int{} func (c *OpContext) Logf(v *Vertex, format string, args ...interface{}) { - if Verbosity == 0 { + if c.LogEval == 0 { return } if v == nil { @@ -146,7 +138,7 @@ func (c *OpContext) Logf(v *Vertex, format string, args ...interface{}) { } // PathToString creates a pretty-printed path of the given list of features. -func (c *OpContext) PathToString(r Runtime, path []Feature) string { +func (c *OpContext) PathToString(path []Feature) string { var b strings.Builder for i, f := range path { if i > 0 { @@ -174,11 +166,13 @@ type Runtime interface { // LoadType retrieves a previously stored CUE expression for a given Go // type if available. LoadType(t reflect.Type) (src ast.Expr, expr Expr, ok bool) + + Settings() (internal.EvaluatorVersion, cuedebug.Config) } type Config struct { Runtime - Format func(Node) string + Format func(Runtime, Node) string } // New creates an operation context. @@ -186,10 +180,14 @@ func New(v *Vertex, cfg *Config) *OpContext { if cfg.Runtime == nil { panic("nil Runtime") } + version, flags := cfg.Runtime.Settings() ctx := &OpContext{ - Runtime: cfg.Runtime, - Format: cfg.Format, - vertex: v, + Runtime: cfg.Runtime, + Format: cfg.Format, + vertex: v, + Version: version, + Config: flags, + taskContext: schedConfig, } if v != nil { ctx.e = &Environment{Up: nil, Vertex: v} @@ -197,15 +195,9 @@ func New(v *Vertex, cfg *Config) *OpContext { return ctx } -type EvaluatorVersion int - -const ( - DefaultVersion EvaluatorVersion = iota - - // The DevVersion is used for new implementations of the evaluator that - // do not cover all features of the CUE language yet. - DevVersion -) +func (c *OpContext) isDevVersion() bool { + return c.Version == internal.DevVersion +} // An OpContext implements CUE's unification operation. It only // operates on values that are created with the Runtime with which an OpContext @@ -213,9 +205,12 @@ const ( // use an OpContext at a time. type OpContext struct { Runtime - Format func(Node) string + Format func(Runtime, Node) string - Version EvaluatorVersion + cuedebug.Config + Version internal.EvaluatorVersion // Copied from Runtime + + taskContext nest int @@ -245,6 +240,15 @@ type OpContext struct { closed map[*closeInfo]*closeStats todo *closeStats + // evalDepth indicates the current depth of evaluation. It is used to + // detect structural cycles and their severity.s + evalDepth int + + // optionalMark indicates the evalDepth at which the last optional field, + // pattern constraint or other construct that may contain errors was + // encountered. A value of 0 indicates we are not within such field. + optionalMark int + // inDisjunct indicates that non-monotonic checks should be skipped. // This is used if we want to do some extra work to eliminate disjunctions // early. The result of unification should be thrown away if this check is @@ -268,6 +272,10 @@ type OpContext struct { // as an error if this is true. // TODO: strictly separate validators and functions. IsValidator bool + + // ErrorGraphs contains an analysis, represented as a Mermaid graph, for + // each node that has an error. + ErrorGraphs map[string]string } func (c *OpContext) CloseInfo() CloseInfo { return c.ci } @@ -324,7 +332,7 @@ func (c *OpContext) Env(upCount int32) *Environment { func (c *OpContext) relNode(upCount int32) *Vertex { e := c.e.up(c, upCount) - c.unify(e.Vertex, partial) + c.unify(e.Vertex, oldOnly(partial)) return e.Vertex } @@ -471,10 +479,10 @@ func (c *OpContext) PopArc(saved *Vertex) { // Should only be used to insert Conjuncts. TODO: perhaps only return Conjuncts // and error. func (c *OpContext) Resolve(x Conjunct, r Resolver) (*Vertex, *Bottom) { - return c.resolveState(x, r, finalized) + return c.resolveState(x, r, final(finalized, allKnown)) } -func (c *OpContext) resolveState(x Conjunct, r Resolver, state vertexStatus) (*Vertex, *Bottom) { +func (c *OpContext) resolveState(x Conjunct, r Resolver, state combinedFlags) (*Vertex, *Bottom) { s := c.PushConjunct(x) arc := r.resolve(c, state) @@ -488,7 +496,9 @@ func (c *OpContext) resolveState(x Conjunct, r Resolver, state vertexStatus) (*V return nil, arc.ChildErrors } - arc = arc.Indirect() + // Dereference any vertices that do not contribute to more knownledge about + // the node. + arc = arc.DerefNonRooted() return arc, err } @@ -497,12 +507,14 @@ func (c *OpContext) resolveState(x Conjunct, r Resolver, state vertexStatus) (*V func (c *OpContext) Lookup(env *Environment, r Resolver) (*Vertex, *Bottom) { s := c.PushState(env, r.Source()) - arc := r.resolve(c, partial) + arc := r.resolve(c, oldOnly(partial)) err := c.PopState(s) - if arc != nil { - arc = arc.Indirect() + if arc != nil && !c.isDevVersion() { + // TODO(deref): lookup should probably not use DerefValue, but + // rather only dereference disjunctions. + arc = arc.DerefValue() } return arc, err @@ -527,8 +539,11 @@ func (c *OpContext) Validate(check Validator, value Value) *Bottom { // concrete returns the concrete value of x after evaluating it. // msg is used to mention the context in which an error occurred, if any. func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) { + s := c.PushState(env, x.Source()) - w, complete := c.Evaluate(env, x) + state := require(partial, concreteKnown) + w := c.evalState(x, state) + _ = c.PopState(s) w, ok := c.getDefault(w) if !ok { @@ -536,6 +551,7 @@ func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result } v := Unwrap(w) + complete = w != nil if !IsConcrete(v) { complete = false b := c.NewErrf("non-concrete value %v in operand to %s", w, msg) @@ -543,11 +559,7 @@ func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result v = b } - if !complete { - return v, complete - } - - return v, true + return v, complete } // getDefault resolves a disjunction to a single value. If there is no default @@ -590,7 +602,7 @@ func (c *OpContext) getDefault(v Value) (result Value, ok bool) { func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) { s := c.PushState(env, x.Source()) - val := c.evalState(x, partial) + val := c.evalState(x, final(partial, concreteKnown)) complete = true @@ -616,14 +628,14 @@ func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete b return val, true } -func (c *OpContext) evaluateRec(v Conjunct, state vertexStatus) Value { +func (c *OpContext) evaluateRec(v Conjunct, state combinedFlags) Value { x := v.Expr() s := c.PushConjunct(v) val := c.evalState(x, state) if val == nil { // Be defensive: this never happens, but just in case. - Assertf(false, "nil return value: unspecified error") + Assertf(c, false, "nil return value: unspecified error") val = &Bottom{ Code: IncompleteError, Err: c.Newf("UNANTICIPATED ERROR"), @@ -637,7 +649,7 @@ func (c *OpContext) evaluateRec(v Conjunct, state vertexStatus) Value { // value evaluates expression v within the current environment. The result may // be nil if the result is incomplete. value leaves errors untouched to that // they can be collected by the caller. -func (c *OpContext) value(x Expr, state vertexStatus) (result Value) { +func (c *OpContext) value(x Expr, state combinedFlags) (result Value) { v := c.evalState(x, state) v, _ = c.getDefault(v) @@ -645,7 +657,7 @@ func (c *OpContext) value(x Expr, state vertexStatus) (result Value) { return v } -func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) { +func (c *OpContext) evalState(v Expr, state combinedFlags) (result Value) { savedSrc := c.src c.src = v.Source() err := c.errs @@ -655,11 +667,11 @@ func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) { c.errs = CombineErrors(c.src, c.errs, err) if v, ok := result.(*Vertex); ok { - if b, _ := v.BaseValue.(*Bottom); b != nil { + if b := v.Bottom(); b != nil { switch b.Code { case IncompleteError: case CycleError: - if state == partial { + if state.vertexStatus() == partial || c.isDevVersion() { break } fallthrough @@ -698,14 +710,63 @@ func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) { if arc == nil { return nil } + // TODO(deref): what is the right level of dereferencing here? + // DerefValue seems to work too. + arc = arc.DerefNonShared() + + // TODO: consider moving this after markCycle, depending on how we + // implement markCycle, or whether we need it at all. + // TODO: is this indirect necessary? + // arc = arc.Indirect() // Save the old CloseInfo and restore after evaluate to avoid detecting // spurious cycles. saved := c.ci - if n := arc.state; n != nil { + n := arc.state + if c.isDevVersion() { + n = arc.getState(c) + } + if n != nil { c.ci, _ = n.markCycle(arc, nil, x, c.ci) } c.ci.Inline = true + + if c.isDevVersion() { + if s := arc.getState(c); s != nil { + needs := state.conditions() + runMode := state.runMode() + + arc.unify(c, needs|arcTypeKnown, attemptOnly) // to set scalar + + if runMode == finalize { + // arc.unify(c, needs, attemptOnly) // to set scalar + // Freeze node. + arc.state.freeze(needs) + } else { + arc.unify(c, needs, runMode) + } + + v := arc + if v.ArcType == ArcPending { + if v.status == evaluating { + for ; v.Parent != nil && v.ArcType == ArcPending; v = v.Parent { + } + err := c.Newf("cycle with field %v", x) + b := &Bottom{Code: CycleError, Err: err} + v.setValue(c, v.status, b) + return b + // TODO: use this instead, as is usual for incomplete errors, + // and also move this block one scope up to also apply to + // defined arcs. In both cases, though, doing so results in + // some errors to be misclassified as evaluation error. + // c.AddBottom(b) + // return nil + } + c.undefinedFieldError(v, IncompleteError) + return nil + } + } + } v := c.evaluate(arc, x, state) c.ci = saved return v @@ -732,7 +793,7 @@ func (c *OpContext) wrapCycleError(src ast.Node, b *Bottom) *Bottom { // unifyNode returns a possibly partially evaluated node value. // // TODO: maybe return *Vertex, *Bottom -func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) { +func (c *OpContext) unifyNode(v Expr, state combinedFlags) (result Value) { savedSrc := c.src c.src = v.Source() err := c.errs @@ -742,7 +803,7 @@ func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) { c.errs = CombineErrors(c.src, c.errs, err) if v, ok := result.(*Vertex); ok { - if b, _ := v.BaseValue.(*Bottom); b != nil && !b.IsIncomplete() { + if b := v.Bottom(); b != nil && !b.IsIncomplete() { result = b } } @@ -781,9 +842,22 @@ func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) { if v == nil { return nil } + v = v.DerefValue() + + // TODO: consider moving this after markCycle, depending on how we + // implement markCycle, or whether we need it at all. + // TODO: is this indirect necessary? + // v = v.Indirect() - if v.isUndefined() || state > v.status { - c.unify(v, state) + if c.isDevVersion() { + if n := v.getState(c); n != nil { + // Always yield to not get spurious errors. + n.process(arcTypeKnown, yield) + } + } else { + if v.isUndefined() || state.vertexStatus() > v.status { + c.unify(v, state) + } } return v @@ -794,7 +868,13 @@ func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) { } } -func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStatus) *Vertex { +func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, flags combinedFlags) *Vertex { + if c.isDevVersion() { + return x.lookup(c, pos, l, flags) + } + + state := flags.vertexStatus() + if l == InvalidLabel || x == nil { // TODO: is it possible to have an invalid label here? Maybe through the // API? @@ -863,9 +943,9 @@ func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStat // hasAllConjuncts, but that are finalized too early, get conjuncts // processed beforehand. if state > a.status { - c.unify(a, state) + c.unify(a, deprecated(c, state)) } else if a.state != nil { - c.unify(a, partial) + c.unify(a, deprecated(c, partial)) } if a.IsConstraint() { @@ -883,6 +963,8 @@ func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStat } } else { if x.state != nil { + x.state.assertInitialized() + for _, e := range x.state.exprs { if isCyclePlaceholder(e.err) { hasCycle = true @@ -893,7 +975,7 @@ func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStat // As long as we have incomplete information, we cannot mark the // inability to look up a field as "final", as it may resolve down the // line. - permanent := x.status > conjuncts + permanent := x.status >= conjuncts if m, _ := x.BaseValue.(*ListMarker); m != nil && !m.IsOpen { permanent = true } @@ -973,7 +1055,7 @@ func pos(x Node) token.Pos { return x.Source().Pos() } -func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Vertex { +func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *Vertex { // TODO: always get the vertex. This allows a whole bunch of trickery // down the line. v := c.unifyNode(x, state) @@ -1002,14 +1084,8 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Ve switch nv := v.(type) { case nil: - switch orig.(type) { - case *ForClause: - c.addErrf(IncompleteError, pos(x), - "cannot range over %s (incomplete)", x) - default: - c.addErrf(IncompleteError, pos(x), - "%s undefined (%s is incomplete)", orig, x) - } + c.addErrf(IncompleteError, pos(x), + "%s undefined (%s is incomplete)", orig, x) return emptyNode case *Bottom: @@ -1026,14 +1102,8 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Ve default: if kind := v.Kind(); kind&StructKind != 0 { - switch orig.(type) { - case *ForClause: - c.addErrf(IncompleteError, pos(x), - "cannot range over %s (incomplete type %s)", x, kind) - default: - c.addErrf(IncompleteError, pos(x), - "%s undefined as %s is incomplete (type %s)", orig, x, kind) - } + c.addErrf(IncompleteError, pos(x), + "%s undefined as %s is incomplete (type %s)", orig, x, kind) return emptyNode } else if !ok { @@ -1318,7 +1388,7 @@ func (c *OpContext) Str(x Node) string { if c.Format == nil { return fmt.Sprintf("%T", x) } - return c.Format(x) + return c.Format(c.Runtime, x) } // NewList returns a new list for the given values. diff --git a/vendor/cuelang.org/go/internal/core/adt/cycle.go b/vendor/cuelang.org/go/internal/core/adt/cycle.go index bfa7dae9a3..c22c535e2d 100644 --- a/vendor/cuelang.org/go/internal/core/adt/cycle.go +++ b/vendor/cuelang.org/go/internal/core/adt/cycle.go @@ -280,6 +280,8 @@ type cyclicConjunct struct { // the conjunct seems to be fully cyclic so far or if there is a valid reference // cycle. func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci CloseInfo) (_ CloseInfo, skip bool) { + n.assertInitialized() + // TODO(perf): this optimization can work if we also check for any // references pointing to arc within arc. This can be done with compiler // support. With this optimization, almost all references could avoid cycle @@ -294,7 +296,16 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl depth := int32(0) for r := ci.Refs; r != nil; r = r.Next { if r.Ref != x { - continue + // TODO(share): this is a bit of a hack. We really should implement + // (*Vertex).cyclicReferences for the new evaluator. However, + // implementing cyclicReferences is somewhat tricky, as it requires + // referenced nodes to be evaluated, which is a guarantee we may not + // want to give. Moreover, it seems we can find a simpler solution + // based on structure sharing. So punt on this solution for now. + if r.Arc != arc || !n.ctx.isDevVersion() { + continue + } + found = true } // A reference that is within a graph that is being evaluated @@ -303,13 +314,13 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl // graph will always be the same address. Hence, if this is a // finalized arc with a different address, it resembles a reference that // is included through a different path and is not a cycle. - if r.Arc != arc && arc.status == finalized { + if !equalDeref(r.Arc, arc) && arc.status == finalized { continue } // For dynamically created structs we mark this as an error. Otherwise // there is only an error if we have visited the arc before. - if ci.Inline && (arc.IsDynamic || r.Arc == arc) { + if ci.Inline && (arc.IsDynamic || equalDeref(r.Arc, arc)) { n.reportCycleError() return ci, true } @@ -317,7 +328,14 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl // We have a reference cycle, as distinguished from a structural // cycle. Reference cycles represent equality, and thus are equal // to top. We can stop processing here. - if r.Node == n.node { + // var nn1, nn2 *Vertex + // if u := r.Node.state.underlay; u != nil { + // nn1 = u.node + // } + // if u := n.node.state.underlay; u != nil { + // nn2 = u.node + // } + if equalDeref(r.Node, n.node) { return ci, true } @@ -341,7 +359,7 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl // "parent" conjuncts? This mechanism seems not entirely // accurate. Maybe a pointer up to find the root and then // "spread" downwards? - if r.Ref == x && r.Arc == rr.Arc { + if r.Ref == x && equalDeref(r.Arc, rr.Arc) { n.node.Conjuncts[i].CloseInfo.IsCyclic = true break } @@ -351,6 +369,12 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl break } + if arc.state != nil { + if d := arc.state.evalDepth; d > 0 && d >= n.ctx.optionalMark { + arc.IsCyclic = true + } + } + // The code in this switch statement registers structural cycles caught // through EvaluatingArcs to the root of the cycle. This way, any node // referencing this value can track these nodes early. This is mostly an @@ -359,6 +383,10 @@ func (n *nodeContext) markCycle(arc *Vertex, env *Environment, x Resolver, ci Cl outer: switch arc.status { case evaluatingArcs: // also Evaluating? + if arc.state.evalDepth < n.ctx.optionalMark { + break + } + // The reference may already be there if we had no-cyclic structure // invalidating the cycle. for r := arc.cyclicReferences; r != nil; r = r.Next { @@ -368,7 +396,7 @@ outer: } arc.cyclicReferences = &RefNode{ - Arc: arc, + Arc: deref(arc), Ref: x, Next: arc.cyclicReferences, } @@ -385,8 +413,8 @@ outer: } } ci.Refs = &RefNode{ - Arc: r.Arc, - Node: n.node, + Arc: deref(r.Arc), + Node: deref(n.node), Ref: x, Next: ci.Refs, @@ -402,6 +430,43 @@ outer: // y: [string]: b: y // x: y // x: c: x + // + // -> + // - in conjuncts + // - out conjuncts: these count for cycle detection. + // x: { + // [string]: <1: y> b: y + // c: x + // } + // x.c: { + // <1: y> b: y + // <2: x> y + // [string]: <3: x, y> b: y + // <2: x> c: x + // } + // x.c.b: { + // <1: y> y + // [string]: <4: y; Cyclic> b: y + // <3: x, y> b: y + // } + // x.c.b.b: { + // <3: x, y> y + // [string]: <5: x, y, Cyclic> b: y + // <4: y, Cyclic> y + // [string]: <5: x, y, Cyclic> b: y + // } + // x.c.c: { // structural cycle + // <3: x, y> b: y + // <2: x> x + // <6: x, Cyclic>: y + // [string]: <8: x, y; Cyclic> b: y + // <7: x, Cyclic>: c: x + // } + // x.c.c.b: { // structural cycle + // <3: x, y> y + // [string]: <3: x, y; Cyclic> b: y + // <8: x, y; Cyclic> y + // } // -> // x: [string]: b: y // x: c: b: y @@ -420,10 +485,14 @@ outer: // gives somewhat better error messages. // We also need to add the reference again if the depth differs, as // the depth is used for tracking "new structure". + // var nn *Vertex + // if u := n.node.state.underlay; u != nil { + // nn = u.node + // } ci.Refs = &RefNode{ - Arc: arc, + Arc: deref(arc), Ref: x, - Node: n.node, + Node: deref(n.node), Next: ci.Refs, Depth: n.depth, } @@ -434,6 +503,14 @@ outer: return ci, false } + // TODO: consider if we should bail if a cycle is detected using this + // mechanism. Ultimately, especially when the old evaluator is removed + // and the status field purged, this should be used instead of the above. + // if !found && arc.state.evalDepth < n.ctx.optionalMark { + // // No cycle. + // return ci, false + // } + alreadyCycle := ci.IsCyclic ci.IsCyclic = true @@ -455,9 +532,7 @@ outer: a := p.Conjuncts count := 0 for _, c := range a { - if !c.CloseInfo.IsCyclic { - count++ - } + count += getNonCyclicCount(c) } if !alreadyCycle { count-- @@ -470,7 +545,12 @@ outer: n.hasCycle = true if !n.hasNonCycle && env != nil { + // TODO: investigate if we can get rid of cyclicConjuncts in the new + // evaluator. v := Conjunct{env, x, ci} + if n.ctx.isDevVersion() { + n.node.cc.incDependent(n.ctx, DEFER, nil) + } n.cyclicConjuncts = append(n.cyclicConjuncts, cyclicConjunct{v, arc}) return ci, true } @@ -478,19 +558,51 @@ outer: return ci, false } +func getNonCyclicCount(c Conjunct) int { + switch a, ok := c.x.(*ConjunctGroup); { + case ok: + count := 0 + for _, c := range *a { + count += getNonCyclicCount(c) + } + return count + + case !c.CloseInfo.IsCyclic: + return 1 + + default: + return 0 + } +} + // updateCyclicStatus looks for proof of non-cyclic conjuncts to override // a structural cycle. func (n *nodeContext) updateCyclicStatus(c CloseInfo) { if !c.IsCyclic { n.hasNonCycle = true for _, c := range n.cyclicConjuncts { - n.addVertexConjuncts(c.c, c.arc, false) + if n.ctx.isDevVersion() { + ci := c.c.CloseInfo + ci.cc = n.node.rootCloseContext(n.ctx) + n.scheduleVertexConjuncts(c.c, c.arc, ci) + n.node.cc.decDependent(n.ctx, DEFER, nil) + } else { + n.addVertexConjuncts(c.c, c.arc, false) + } } n.cyclicConjuncts = n.cyclicConjuncts[:0] } } func assertStructuralCycle(n *nodeContext) bool { + // TODO: is this the right place to put it? + if n.ctx.isDevVersion() { + for range n.cyclicConjuncts { + n.node.cc.decDependent(n.ctx, DEFER, nil) + } + n.cyclicConjuncts = n.cyclicConjuncts[:0] + } + if n.hasCycle && !n.hasNonCycle { n.reportCycleError() return true @@ -523,3 +635,62 @@ func makeAnonymousConjunct(env *Environment, x Expr, refs *RefNode) Conjunct { }}, } } + +// incDepth increments the evaluation depth. This should typically be called +// before descending into a child node. +func (n *nodeContext) incDepth() { + n.ctx.evalDepth++ +} + +// decDepth decrements the evaluation depth. It should be paired with a call to +// incDepth and be called after the processing of child nodes is done. +func (n *nodeContext) decDepth() { + n.ctx.evalDepth-- +} + +// markOptional marks that we are about to process an "optional element" that +// allows errors. In these cases, structural cycles are not "terminal". +// +// Examples of such constructs are: +// +// Optional fields: +// +// a: b?: a +// +// Pattern constraints: +// +// a: [string]: a +// +// Disjunctions: +// +// a: b: null | a +// +// A call to markOptional should be paired with a call to unmarkOptional. +func (n *nodeContext) markOptional() (saved int) { + saved = n.ctx.evalDepth + n.ctx.optionalMark = n.ctx.evalDepth + return saved +} + +// See markOptional. +func (n *nodeContext) unmarkOptional(saved int) { + n.ctx.optionalMark = saved +} + +// markDepth assigns the current evaluation depth to the receiving node. +// Any previously assigned depth is saved and returned and should be restored +// using unmarkDepth after processing n. +// +// When a node is encountered with a depth set to a non-zero value this +// indicates a cycle. The cycle is an evaluation cycle when the node's depth +// is equal to the current depth and a structural cycle otherwise. +func (n *nodeContext) markDepth() (saved int) { + saved = n.evalDepth + n.evalDepth = n.ctx.evalDepth + return saved +} + +// See markDepth. +func (n *nodeContext) unmarkDepth(saved int) { + n.evalDepth = saved +} diff --git a/vendor/cuelang.org/go/internal/core/adt/debug.go b/vendor/cuelang.org/go/internal/core/adt/debug.go new file mode 100644 index 0000000000..4d0728821b --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/debug.go @@ -0,0 +1,703 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" +) + +// RecordDebugGraph records debug output in ctx if there was an anomaly +// discovered. +func RecordDebugGraph(ctx *OpContext, v *Vertex, name string) { + graph, hasError := CreateMermaidGraph(ctx, v, true) + if hasError { + if ctx.ErrorGraphs == nil { + ctx.ErrorGraphs = map[string]string{} + } + path := ctx.PathToString(v.Path()) + ctx.ErrorGraphs[path] = graph + } +} + +var ( + // DebugDeps enables dependency tracking for debugging purposes. + // It is off by default, as it adds a significant overhead. + // + // TODO: hook this init CUE_DEBUG, once we have set this up as a single + // environment variable. For instance, CUE_DEBUG=matchdeps=1. + DebugDeps = false + + OpenGraphs = false + + // MaxGraphs is the maximum number of debug graphs to be opened. To avoid + // confusion, a panic will be raised if this number is exceeded. + MaxGraphs = 10 + + numberOpened = 0 +) + +// OpenNodeGraph takes a given mermaid graph and opens it in the system default +// browser. +func OpenNodeGraph(title, path, code, out, graph string) { + if !OpenGraphs { + return + } + if numberOpened > MaxGraphs { + panic("too many debug graphs opened") + } + numberOpened++ + + err := os.MkdirAll(path, 0755) + if err != nil { + log.Fatal(err) + } + url := filepath.Join(path, "graph.html") + + w, err := os.Create(url) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + data := struct { + Title string + Code string + Out string + Graph string + }{ + Title: title, + Code: code, + Out: out, + Graph: graph, + } + + tmpl := template.Must(template.New("").Parse(` + + + + {{.Title}} + + + + + +
{{.Graph}}
+
+
+

Input

+
{{.Code}}
+
+
+

Output

+
{{.Out}}
+
+
+ + +`)) + + err = tmpl.Execute(w, data) + if err != nil { + log.Fatal(err) + } + + openBrowser(url) +} + +// openDebugGraph opens a browser with a graph of the state of the given Vertex +// and all its dependencies that have not completed processing. +// DO NOT DELETE: this is used to insert during debugging of the evaluator +// to inspect a node. +func openDebugGraph(ctx *OpContext, v *Vertex, name string) { + graph, _ := CreateMermaidGraph(ctx, v, true) + path := filepath.Join(".debug", "TestX", name) + OpenNodeGraph(name, path, "in", "out", graph) +} + +// depKind is a type of dependency that is tracked with incDependent and +// decDependent. For each there should be matching pairs passed to these +// functions. The debugger, when used, tracks and verifies that these +// dependencies are balanced. +type depKind int + +const ( + // PARENT dependencies are used to track the completion of parent + // closedContexts within the closedness tree. + PARENT depKind = iota + 1 + + // ARC dependencies are used to track the completion of corresponding + // closedContexts in parent Vertices. + ARC + + // NOTIFY dependencies keep a note while dependent conjuncts are collected + NOTIFY // root node of source + + // TASK dependencies are used to track the completion of a task. + TASK + + // DISJUNCT is used to mark an incomplete disjunct. + DISJUNCT + + // EVAL tracks that the conjunct associated with a closeContext has been + // inserted using scheduleConjunct. A closeContext may not be deleted + // as long as the conjunct has not been evaluated yet. + // This prevents a node from being released if an ARC decrement happens + // before a node is evaluated. + EVAL + + // COMP tracks pending arcs in comprehensions. + COMP + + // ROOT dependencies are used to track that all nodes of parents are + // added to a tree. + ROOT // Always refers to self. + + // INIT dependencies are used to hold ownership of a closeContext during + // initialization and prevent it from being finalized when scheduling a + // node's conjuncts. + INIT + + // DEFER is used to track recursive processing of a node. + DEFER // Always refers to self. + + // TEST is used for testing notifications. + TEST // Always refers to self. +) + +func (k depKind) String() string { + switch k { + case PARENT: + return "PARENT" + case ARC: + return "ARC" + case NOTIFY: + return "NOTIFY" + case TASK: + return "TASK" + case DISJUNCT: + return "DISJUNCT" + case EVAL: + return "EVAL" + case COMP: + return "COMP" + case ROOT: + return "ROOT" + + case INIT: + return "INIT" + case DEFER: + return "DEFER" + case TEST: + return "TEST" + } + panic("unreachable") +} + +// ccDep is used to record counters which is used for debugging only. +// It is purpose is to be precise about matching inc/dec as well as to be able +// to traverse dependency. +type ccDep struct { + dependency *closeContext + kind depKind + decremented bool + + // task keeps a reference to a task for TASK dependencies. + task *task + // taskID indicates the sequence number of a task within a scheduler. + taskID int +} + +func (c *closeContext) addDependent(ctx *OpContext, kind depKind, dependant *closeContext) *ccDep { + if !DebugDeps { + return nil + } + + if dependant == nil { + dependant = c + } + + if ctx.LogEval > 1 { + ctx.Logf(ctx.vertex, "INC(%s) %v %p parent: %p %d\n", kind, c.Label(), c, c.parent, c.conjunctCount) + } + + dep := &ccDep{kind: kind, dependency: dependant} + c.dependencies = append(c.dependencies, dep) + + return dep +} + +// matchDecrement checks that this decrement matches a previous increment. +func (c *closeContext) matchDecrement(ctx *OpContext, v *Vertex, kind depKind, dependant *closeContext) { + if !DebugDeps { + return + } + + if dependant == nil { + dependant = c + } + + if ctx.LogEval > 1 { + ctx.Logf(ctx.vertex, "DEC(%s) %v %p %d\n", kind, c.Label(), c, c.conjunctCount) + } + + for _, d := range c.dependencies { + if d.kind != kind { + continue + } + if d.dependency != dependant { + continue + } + // Only one typ-dependant pair possible. + if d.decremented { + // There might be a duplicate entry, so continue searching. + continue + } + + d.decremented = true + return + } + + panic(fmt.Sprintf("unmatched decrement: %s", kind)) +} + +// mermaidContext is used to create a dependency analysis for a node. +type mermaidContext struct { + ctx *OpContext + v *Vertex + + all bool + + hasError bool + + // roots maps the root closeContext of any Vertex to the analysis data + // for that Vertex. + roots map[*closeContext]*mermaidVertex + + // processed indicates whether the node in question has been processed + // by the dependency analysis. + processed map[*closeContext]bool + + // inConjuncts indicates whether a node is explicitly referenced by + // a Conjunct. These nodes are visualized with an additional circle. + inConjuncts map[*closeContext]bool + + // ccID maps a closeContext to a unique ID. + ccID map[*closeContext]string + + w io.Writer + + // vertices lists an analysis of all nodes related to the analyzed node. + // The first node is the node being analyzed itself. + vertices []*mermaidVertex +} + +type mermaidVertex struct { + f Feature + w *bytes.Buffer + tasks *bytes.Buffer + intra *bytes.Buffer +} + +// CreateMermaidGraph creates an analysis of relations and values involved in +// nodes with unbalanced increments. The graph is in Mermaid format. +func CreateMermaidGraph(ctx *OpContext, v *Vertex, all bool) (graph string, hasError bool) { + if !DebugDeps { + return "", false + } + + buf := &strings.Builder{} + + m := &mermaidContext{ + ctx: ctx, + v: v, + roots: map[*closeContext]*mermaidVertex{}, + processed: map[*closeContext]bool{}, + inConjuncts: map[*closeContext]bool{}, + ccID: map[*closeContext]string{}, + w: buf, + all: all, + } + + io.WriteString(m.w, "graph TD\n") + io.WriteString(m.w, " classDef err fill:#e01010,stroke:#000000,stroke-width:3,font-size:medium\n") + + indent(m.w, 1) + fmt.Fprintf(m.w, "style %s stroke-width:5\n\n", m.vertexID(v)) + // Trigger descent on first vertex. This may include other vertices when + // traversing closeContexts if they have dependencies on such vertices. + m.vertex(v) + + // Close and flush all collected vertices. + for i, v := range m.vertices { + v.closeVertex() + if i == 0 || len(m.ccID) > 0 { + m.w.Write(v.w.Bytes()) + } + } + + return buf.String(), m.hasError +} + +// vertex creates a blob of Mermaid graph representing one vertex. It has +// the following shape (where ptr(x) means pointer of x): +// +// subgraph ptr(v) +// %% root note if ROOT has not been decremented. +// root((cc1)) -|R|-> ptr(cc1) +// +// %% closedness graph dependencies +// ptr(cc1) +// ptr(cc2) -|P|-> ptr(cc1) +// ptr(cc2) -|E|-> ptr(cc1) %% mid schedule +// +// %% tasks +// subgraph tasks +// ptr(cc3) +// ptr(cc4) +// ptr(cc5) +// end +// +// %% outstanding tasks and the contexts they depend on +// ptr(cc3) -|T|-> ptr(cc2) +// +// subgraph notifications +// ptr(cc6) +// ptr(cc7) +// end +// end +// %% arcs from nodes to nodes in other vertices +// ptr(cc1) -|A|-> ptr(cc10) +// ptr(vx) -|N|-> ptr(cc11) +// +// +// A vertex has the following name: path(v); done +// +// Each closeContext has the following info: ptr(cc); cc.count +func (m *mermaidContext) vertex(v *Vertex) *mermaidVertex { + root := v.rootCloseContext(m.ctx) + + vc := m.roots[root] + if vc != nil { + return vc + } + + vc = &mermaidVertex{ + f: v.Label, + w: &bytes.Buffer{}, + intra: &bytes.Buffer{}, + } + m.vertices = append(m.vertices, vc) + + m.tagReferencedConjuncts(v.Conjuncts) + + m.roots[root] = vc + w := vc.w + + var status string + switch { + case v.status == finalized: + status = "finalized" + case v.state == nil: + status = "ready" + default: + status = v.state.scheduler.state.String() + } + path := m.vertexPath(v) + if v.ArcType != ArcMember { + path += fmt.Sprintf("/%v", v.ArcType) + } + + indentOnNewline(w, 1) + fmt.Fprintf(w, "subgraph %s[%s: %s]\n", m.vertexID(v), path, status) + + m.cc(root) + + return vc +} + +func (m *mermaidContext) tagReferencedConjuncts(a []Conjunct) { + for _, c := range a { + m.inConjuncts[c.CloseInfo.cc] = true + + if g, ok := c.x.(*ConjunctGroup); ok { + m.tagReferencedConjuncts([]Conjunct(*g)) + } + } +} + +func (v *mermaidVertex) closeVertex() { + w := v.w + + if v.tasks != nil { + indent(v.tasks, 2) + fmt.Fprintf(v.tasks, "end\n") + w.Write(v.tasks.Bytes()) + } + + // TODO: write all notification sources (or is this just the node?) + + indent(w, 1) + fmt.Fprintf(w, "end\n") +} + +func (m *mermaidContext) task(d *ccDep) string { + v := d.dependency.src + + // This must already exist. + vc := m.vertex(v) + + if vc.tasks == nil { + vc.tasks = &bytes.Buffer{} + indentOnNewline(vc.tasks, 2) + fmt.Fprintf(vc.tasks, "subgraph %s_tasks[tasks]\n", m.vertexID(v)) + } + + if d.task != nil && v != d.task.node.node { + panic("inconsistent task") + } + taskID := fmt.Sprintf("%s_%d", m.vertexID(v), d.taskID) + var state string + var completes condition + var kind string + if d.task != nil { + state = d.task.state.String()[:2] + completes = d.task.completes + kind = d.task.run.name + } + indentOnNewline(vc.tasks, 3) + fmt.Fprintf(vc.tasks, "%s(%d", taskID, d.taskID) + indentOnNewline(vc.tasks, 4) + io.WriteString(vc.tasks, state) + indentOnNewline(vc.tasks, 4) + io.WriteString(vc.tasks, kind) + indentOnNewline(vc.tasks, 4) + fmt.Fprintf(vc.tasks, "%x)\n", completes) + + if s := d.task.blockedOn; s != nil { + m.vertex(s.node.node) + fmt.Fprintf(m.w, "%s_tasks == BLOCKED ==> %s\n", m.vertexID(s.node.node), taskID) + } + + return taskID +} + +func (m *mermaidContext) cc(cc *closeContext) { + if m.processed[cc] { + return + } + m.processed[cc] = true + + // This must already exist. + v := m.vertex(cc.src) + + // Dependencies at different scope levels. + global := m.w + node := v.w + + for _, d := range cc.dependencies { + indentLevel := 2 + var w io.Writer + var name, link string + + switch { + case !d.decremented: + link = fmt.Sprintf(`--%s-->`, d.kind.String()) + case m.all: + link = fmt.Sprintf("-. %s .->", d.kind.String()[0:1]) + default: + continue + } + + // Only include still outstanding nodes. + switch d.kind { + case PARENT: + w = node + name = m.pstr(d.dependency) + case EVAL: + if cc.Label().IsLet() { + // Do not show eval links for let nodes, as they never depend + // on the parent node. Alternatively, link them to the root + // node instead. + return + } + fallthrough + case ARC, NOTIFY, DISJUNCT, COMP: + w = global + indentLevel = 1 + name = m.pstr(d.dependency) + + case TASK: + w = node + taskID := "disjunct" + if d.task != nil { + taskID = m.task(d) + } + name = fmt.Sprintf("%s((%d))", taskID, d.taskID) + case ROOT, INIT: + w = node + src := cc.src + if v.f != src.Label { + panic("incompatible labels") + } + name = fmt.Sprintf("root_%s", m.vertexID(src)) + } + + if w != nil { + dst := m.pstr(cc) + indent(w, indentLevel) + fmt.Fprintf(w, "%s %s %s\n", name, link, dst) + } + + // If the references count is 0, all direct dependencies must have + // completed as well. In this case, descending into each of them should + // not end up printing anything. In case of any bugs, these nodes will + // show up as unattached nodes. + + if dep := d.dependency; dep != nil && dep != cc { + m.cc(dep) + } + } +} + +func (m *mermaidContext) vertexPath(v *Vertex) string { + path := m.ctx.PathToString(v.Path()) + if path == "" { + return "_" + } + return path +} + +const sigPtrLen = 6 + +func (m *mermaidContext) vertexID(v *Vertex) string { + s := fmt.Sprintf("%p", v) + return "v" + s[len(s)-sigPtrLen:] +} + +func (m *mermaidContext) pstr(cc *closeContext) string { + if id, ok := m.ccID[cc]; ok { + return id + } + + ptr := fmt.Sprintf("%p", cc) + ptr = ptr[len(ptr)-sigPtrLen:] + id := fmt.Sprintf("cc%s", ptr) + m.ccID[cc] = id + + v := m.vertex(cc.src) + + w := v.w + + indent(w, 2) + w.WriteString(id) + + var open, close = "((", "))" + if m.inConjuncts[cc] { + open, close = "(((", ")))" + } + + w.WriteString(open) + w.WriteString("cc") + if cc.conjunctCount > 0 { + fmt.Fprintf(w, " c:%d: d:%d", cc.conjunctCount, cc.disjunctCount) + } + indentOnNewline(w, 3) + w.WriteString(ptr) + + flags := &bytes.Buffer{} + addFlag := func(test bool, flag byte) { + if test { + flags.WriteByte(flag) + } + } + addFlag(cc.isDef, '#') + addFlag(cc.isEmbed, 'E') + addFlag(cc.isClosed, 'c') + addFlag(cc.isClosedOnce, 'C') + addFlag(cc.hasEllipsis, 'o') + flags.WriteByte(cc.arcType.String()[0]) + io.Copy(w, flags) + + w.WriteString(close) + + switch { + case cc.conjunctCount == 0: + case cc.conjunctCount <= cc.disjunctCount: + // TODO: Extra checks for disjunctions? + // E.g.: cc.src is not a disjunction + default: + // If cc.conjunctCount > cc.disjunctCount. + // TODO: count the number of non-decremented DISJUNCT dependencies. + fmt.Fprintf(w, ":::err") + if cc.src == m.v { + m.hasError = true + } + } + + w.WriteString("\n") + + return id +} + +func indentOnNewline(w io.Writer, level int) { + w.Write([]byte{'\n'}) + indent(w, level) +} + +func indent(w io.Writer, level int) { + for i := 0; i < level; i++ { + io.WriteString(w, " ") + } +} + +// openBrowser opens the given URL in the default browser. +func openBrowser(url string) { + var cmd *exec.Cmd + + switch runtime.GOOS { + case "windows": + cmd = exec.Command("cmd", "/c", "start", url) + case "darwin": + cmd = exec.Command("open", url) + default: + cmd = exec.Command("xdg-open", url) + } + + err := cmd.Start() + if err != nil { + log.Fatal(err) + } + go cmd.Wait() +} diff --git a/vendor/cuelang.org/go/internal/core/adt/default.go b/vendor/cuelang.org/go/internal/core/adt/default.go index 885e96181c..7fc0b8a778 100644 --- a/vendor/cuelang.org/go/internal/core/adt/default.go +++ b/vendor/cuelang.org/go/internal/core/adt/default.go @@ -45,6 +45,7 @@ func (d *Disjunction) Default() Value { // // It also closes a list, representing its default value. func (v *Vertex) Default() *Vertex { + v = v.DerefValue() switch d := v.BaseValue.(type) { default: return v @@ -126,6 +127,22 @@ func stripNonDefaults(elem Elem) (r Elem, stripped bool) { } return x, false + case *ConjunctGroup: + // NOTE: this code requires allocations unconditional. This should be + // mitigated once we optimize conjunct groupings. + isNew := false + var a ConjunctGroup = make([]Conjunct, len(*x)) + for i, c := range *x { + a[i].x, ok = stripNonDefaults(c.Expr()) + if ok { + isNew = true + } + } + if isNew { + return &a, true + } + return x, false + default: return x, false } diff --git a/vendor/cuelang.org/go/internal/core/adt/dev.go b/vendor/cuelang.org/go/internal/core/adt/dev.go new file mode 100644 index 0000000000..28db747d3c --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/dev.go @@ -0,0 +1,79 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// This file contains types to help in the transition from the old to new +// evaluation model. + +func unreachableForDev(c *OpContext) { + if c.isDevVersion() { + panic("unreachable for development version") + } +} + +type combinedFlags uint32 + +// oldOnly indicates that a Vertex should only be evaluated for the old +// evaluator. +func oldOnly(state vertexStatus) combinedFlags { + return combinedFlags(state) | + combinedFlags(ignore)<<8 | + combinedFlags(allKnown)<<16 +} + +func combineMode(cond condition, mode runMode) combinedFlags { + return combinedFlags(mode)<<8 | combinedFlags(cond)<<16 +} + +func attempt(state vertexStatus, cond condition) combinedFlags { + return combinedFlags(state) | combineMode(cond, attemptOnly) +} + +func require(state vertexStatus, cond condition) combinedFlags { + return combinedFlags(state) | combineMode(cond, yield) +} + +func final(state vertexStatus, cond condition) combinedFlags { + return combinedFlags(state) | combineMode(cond, finalize) +} + +func deprecated(c *OpContext, state vertexStatus) combinedFlags { + // if c.isDevVersion() { + // panic("calling function may not be used in new evaluator") + // } + return combinedFlags(state) +} + +func (f combinedFlags) vertexStatus() vertexStatus { + return vertexStatus(f & 0xff) +} + +func (f combinedFlags) withVertexStatus(x vertexStatus) combinedFlags { + f &^= 0xff + f |= combinedFlags(x) + return f +} + +func (f combinedFlags) conditions() condition { + return condition(f >> 16) +} + +func (f combinedFlags) runMode() runMode { + return runMode(f>>8) & 0xff +} + +func (f combinedFlags) ignore() bool { + return f&(combinedFlags(ignore)<<8) != 0 +} diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct.go b/vendor/cuelang.org/go/internal/core/adt/disjunct.go index 45abffcbd9..675fab7225 100644 --- a/vendor/cuelang.org/go/internal/core/adt/disjunct.go +++ b/vendor/cuelang.org/go/internal/core/adt/disjunct.go @@ -83,8 +83,16 @@ import ( // - So only need to check exact labels for vertices. type envDisjunct struct { - env *Environment - cloneID CloseInfo + env *Environment + cloneID CloseInfo + + // fields for new evaluator + + src Node + disjuncts []disjunct + + // fields for old evaluator + expr *DisjunctionExpr value *Disjunction hasDefaults bool @@ -108,13 +116,21 @@ func (n *nodeContext) addDisjunction(env *Environment, x *DisjunctionExpr, clone } } - n.disjunctions = append(n.disjunctions, - envDisjunct{env, cloneID, x, nil, numDefaults > 0, false, false}) + n.disjunctions = append(n.disjunctions, envDisjunct{ + env: env, + cloneID: cloneID, + expr: x, + hasDefaults: numDefaults > 0, + }) } func (n *nodeContext) addDisjunctionValue(env *Environment, x *Disjunction, cloneID CloseInfo) { - n.disjunctions = append(n.disjunctions, - envDisjunct{env, cloneID, nil, x, x.HasDefaults, false, false}) + n.disjunctions = append(n.disjunctions, envDisjunct{ + env: env, + cloneID: cloneID, + value: x, + hasDefaults: x.HasDefaults, + }) } @@ -124,6 +140,8 @@ func (n *nodeContext) expandDisjuncts( parentMode defaultMode, // default mode of this disjunct recursive, last bool) { + unreachableForDev(n.ctx) + n.ctx.stats.Disjuncts++ // refNode is used to collect cyclicReferences for all disjuncts to be @@ -342,7 +360,7 @@ func (n *nodeContext) expandDisjuncts( m = combineDefault(m, info.nestedMode) case hasDefaults && !used: - Assertf(parent == notDefault, "unexpected default mode") + Assertf(n.ctx, parent == notDefault, "unexpected default mode") } } d.defaultMode = m diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct2.go b/vendor/cuelang.org/go/internal/core/adt/disjunct2.go new file mode 100644 index 0000000000..503da32a85 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/disjunct2.go @@ -0,0 +1,763 @@ +// Copyright 2024 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// # Overview +// +// This files contains the disjunction algorithm of the CUE evaluator. It works +// in unison with the code in overlay.go. +// +// In principle, evaluating disjunctions is a matter of unifying each disjunct +// with the non-disjunct values, eliminate those that fail and see what is left. +// In case of multiple disjunctions it is a simple cross product of disjuncts. +// The key is how to do this efficiently. +// +// # Classification of disjunction performance +// +// The key to an efficient disjunction algorithm is to minimize the impact of +// taking cross product of disjunctions. This is especially pertinent if +// disjunction expressions can be unified with themselves, as can be the case in +// recursive definitions, as this can lead to exponential time complexity. +// +// We identify the following categories of importance for performance +// optimization: +// +// - Eliminate duplicates +// - For completed disjunctions +// - For partially computed disjuncts +// - Fail early / minimize work before failure +// - Filter disjuncts before unification (TODO) +// - Based on discriminator field +// - Based on a non-destructive unification of the disjunct and +// the current value computed so far +// - During the regular destructive unification +// - Traverse arcs where failure may occur +// - Copy on write (TODO) +// +// We discuss these aspects in more detail below. +// +// # Eliminating completed duplicates +// +// Eliminating completed duplicates can be achieved by comparing them for +// equality. A disjunct can only be considered completed if all disjuncts have +// been selected and evaluated, or at any time if processing fails. +// +// The following values should be recursively considered for equality: +// +// - the value of the node, +// - the value of its arcs, +// - the key and value of the pattern constraints, and +// - the expression of the allowed fields. +// +// In some of these cases it may not be possible to detect if two nodes are +// equal. For instance, two pattern constraints with two different regular +// expressions as patterns, but that define an identical language, should be +// considered equal. In practice, however, this is hard to distinguish. +// +// In the end this is mostly a matter of performance. As we noted, the biggest +// concern is to avoid a combinatorial explosion when disjunctions are unified +// with itself. The hope is that we can at least catch these cases, either +// because they will evaluate to the same values, or because we can identify +// that the underlying expressions are the same, or both. +// +// # Eliminating partially-computed duplicates +// +// We start with some observations and issues regarding partially evaluated +// nodes. +// +// ## Issue: Closedness +// +// Two identical CUE values with identical field, values, and pattern +// constraints, may still need to be consider as different, as they may exhibit +// different closedness behavior. Consider, for instance, this example: +// +// #def: { +// {} | {c: string} // D1 +// {} | {a: string} // D2 +// } +// x: #def +// x: c: "foo" +// +// Now, consider the case of the cross product that unifies the two empty +// structs for `x`. Note that `x` already has a field `c`. After unifying the +// first disjunction with `x`, both intermediate disjuncts will have the value +// `{c: "foo"}`: +// +// {c: "foo"} & ({} | {c: string}) +// => +// {c: "foo"} | {c: "foo"} +// +// One would think that one of these disjuncts can be eliminated. Nonetheless, +// there is a difference. The second disjunct, which resulted from unifying +// `{c: "foo"}` with `{c: string}`, will remain valid. The first disjunct, +// however, will fail after it is unified and completed with the `{}` of the +// second disjunctions (D2): only at this point is it known that x was unified +// with an empty closed struct, and that field `c` needs to be rejected. +// +// One possible solution would be to fully compute the cross product of `#def` +// and use this expanded disjunction for unification, as this would mean that +// full knowledge of closedness information is available. +// +// Although this is possible in some cases and can be a useful performance +// optimization, it is not always possible to use the fully evaluated disjuncts +// in such a precomputed cross product. For instance, if a disjunction relies on +// a comprehension or a default value, it is not possible to fully evaluate the +// disjunction, as merging it with another value may change the inputs for such +// expressions later on. This means that we can only rely on partial evaluation +// in some cases. +// +// ## Issue: Outstanding tasks in partial results +// +// Some tasks may not be completed until all conjuncts are known. For cross +// products of disjunctions this may mean that such tasks cannot be completed +// until all cross products are done. For instance, it is typically not possible +// to evaluate a tasks that relies on taking a default value that may change as +// more disjuncts are added. A similar argument holds for comprehensions on +// values that may still be changed as more disjunctions come in. +// +// ## Evaluating equality of partially evaluated nodes +// +// Because unevaluated expressions may depend on results that have yet been +// computed, we cannot reliably compare the results of a Vertex to determine +// equality. We need a different strategy. +// +// The strategy we take is based on the observation that at the start of a cross +// product, the base conjunct is the same for all disjuncts. We can factor these +// inputs out and focus on the differences between the disjuncts. In other +// words, we can focus solely on the differences that manifest at the insertion +// points (or "disjunction holes") of the disjuncts. +// +// In short, two disjuncts are equal if: +// +// 1. the disjunction holes that were already processed are equal, and +// 2. they have either no outstanding tasks, or the outstanding tasks are equal +// +// Coincidentally, analyzing the differences as discussed in this section is +// very similar in nature to precomputing a disjunct and using that. The main +// difference is that we potentially have more information to prematurely +// evaluate expressions and thus to prematurely filter values. For instance, the +// mixed in value may have fixed a value that previously was not fixed. This +// means that any expression referencing this value may be evaluated early and +// can cause a disjunct to fail and be eliminated earlier. +// +// A disadvantage of this approach, however, is that it is not fully precise: it +// may not filter some disjuncts that are logically identical. There are +// strategies to further optimize this. For instance, if all remaining holes do +// not contribute to closedness, which can be determined by walking up the +// closedness parent chain, we may be able to safely filter disjuncts with equal +// results. +// +// # Invariants +// +// We use the following assumptions in the below implementation: +// +// - No more conjuncts are added to a disjunct after its processing begins. +// If a disjunction results in a value that causes more fields to be added +// later, this may not influence the result of the disjunction, i.e., those +// changes must be idempotent. +// - TODO: consider if any other assumptions are made. +// +// # Algorithm +// +// The evaluator accumulates all disjuncts of a Vertex in the nodeContext along +// with the closeContext at which each was defined. A single task is scheduled +// to process them all at once upon the first encounter of a disjunction. +// +// The algorithm is as follows: +// - Initialize the current Vertex n with the result evaluated so far as a +// list of "previous disjuncts". +// - Iterate over each disjunction +// - For each previous disjunct x +// - For each disjunct y in the current disjunctions +// - Unify +// - Discard if error, store in the list of current disjunctions if +// it differs from all other disjunctions in this list. +// - Set n to the result of the disjunction. +// +// This algorithm is recursive: if a disjunction is encountered in a disjunct, +// it is processed as part of the evaluation of that disjunct. +// + +// A disjunct is the expanded form of the disjuncts of either an Disjunction or +// a DisjunctionExpr. +// +// TODO(perf): encode ADT structures in the correct form so that we do not have to +// compute these each time. +type disjunct struct { + expr Expr + err *Bottom + + isDefault bool + mode defaultMode +} + +// disjunctHole associates a closeContext copy representing a disjunct hole with +// the underlying closeContext from which it originally was branched. +// We could include this information in the closeContext itself, but since this +// is relatively rare, we keep it separate to avoid bloating the closeContext. +type disjunctHole struct { + cc *closeContext + underlying *closeContext +} + +func (n *nodeContext) scheduleDisjunction(d envDisjunct) { + if len(n.disjunctions) == 0 { + // This processes all disjunctions in a single pass. + n.scheduleTask(handleDisjunctions, nil, nil, CloseInfo{}) + } + + // ccHole is the closeContext in which the individual disjuncts are + // scheduled. + ccHole := d.cloneID.cc + + // This counter can be decremented after either a disjunct has been + // scheduled in the clone. Note that it will not be closed in the original + // as the result will either be an error, a single disjunct, in which + // case mergeVertex will override the original value, or multiple disjuncts, + // in which case the original is set to the disjunct itself. + ccHole.incDisjunct(n.ctx, DISJUNCT) + + n.disjunctions = append(n.disjunctions, d) + + n.disjunctCCs = append(n.disjunctCCs, disjunctHole{ + cc: ccHole, // this value is cloned in doDisjunct. + underlying: ccHole, + }) +} + +func initArcs(ctx *OpContext, v *Vertex) { + for _, a := range v.Arcs { + a.getState(ctx) + initArcs(ctx, a) + } +} + +func (n *nodeContext) processDisjunctions() *Bottom { + defer func() { + // TODO: + // Clear the buffers. + // TODO: we may want to retain history of which disjunctions were + // processed. In that case we can set a disjunction position to end + // of the list and schedule new tasks if this position equals the + // disjunction list length. + }() + + a := n.disjunctions + n.disjunctions = n.disjunctions[:0] + + initArcs(n.ctx, n.node) + + // TODO(perf): single pass for quick filter on all disjunctions. + // n.node.unify(n.ctx, allKnown, attemptOnly) + + // Initially we compute the cross product of a disjunction with the + // nodeContext as it is processed so far. + cross := []*nodeContext{n} + results := []*nodeContext{} // TODO: use n.disjuncts as buffer. + + // Slow path for processing all disjunctions. Do not use `range` in case + // evaluation adds more disjunctions. + for i := 0; i < len(a); i++ { + d := &a[i] + + mode := attemptOnly + if i == len(a)-1 { + mode = finalize + } + results = n.crossProduct(results, cross, d, mode) + + // TODO: do we unwind only at the end or also intermittently? + switch len(results) { + case 0: + // TODO: now we have disjunct counters, do we plug holes at all? + + // We add a "top" value to disable closedness checking for this + // disjunction to avoid a spurious "field not allowed" error. + // We return the errors below, which will, in turn, be reported as + // the error. + // TODO: probably no longer needed: + for i++; i < len(a); i++ { + c := MakeConjunct(d.env, top, a[i].cloneID) + n.scheduleConjunct(c, d.cloneID) + } + + // Empty intermediate result. Further processing will not result in + // any new result, so we can terminate here. + // TODO(errors): investigate remaining disjunctions for errors. + return n.collectErrors(d) + + case 1: + // TODO: consider injecting the disjuncts into the main nodeContext + // here. This would allow other values that this disjunctions + // depends on to be evaluated. However, we should investigate + // whether this might lead to a situation where the order of + // evaluating disjunctions matters. So to be safe, we do not allow + // this for now. + } + + // switch up buffers. + cross, results = results, cross[:0] + } + + switch len(cross) { + case 0: + panic("unreachable: empty disjunction already handled above") + + case 1: + d := cross[0].node + n.node.BaseValue = d + n.defaultMode = cross[0].defaultMode + + default: + // append, rather than assign, to allow reusing the memory of + // a pre-existing slice. + n.disjuncts = append(n.disjuncts, cross...) + } + + return nil +} + +// crossProduct computes the cross product of the disjuncts of a disjunction +// with an existing set of results. +func (n *nodeContext) crossProduct(dst, cross []*nodeContext, dn *envDisjunct, mode runMode) []*nodeContext { + defer n.unmarkDepth(n.markDepth()) + defer n.unmarkOptional(n.markOptional()) + + for _, p := range cross { + // TODO: use a partial unify instead + // p.completeNodeConjuncts() + initArcs(n.ctx, p.node) + + for j, d := range dn.disjuncts { + c := MakeConjunct(dn.env, d.expr, dn.cloneID) + r, err := p.doDisjunct(c, d.mode, mode) + + if err != nil { + // TODO: store more error context + dn.disjuncts[j].err = err + continue + } + + // Unroll nested disjunctions. + switch len(r.disjuncts) { + case 0: + // r did not have a nested disjunction. + dst = appendDisjunct(n.ctx, dst, r) + + case 1: + panic("unexpected number of disjuncts") + + default: + for _, x := range r.disjuncts { + dst = appendDisjunct(n.ctx, dst, x) + } + } + } + } + return dst +} + +// collectErrors collects errors from a failed disjunctions. +func (n *nodeContext) collectErrors(dn *envDisjunct) (errs *Bottom) { + for _, d := range dn.disjuncts { + if d.err != nil { + errs = CombineErrors(dn.src.Source(), errs, d.err) + } + } + return errs +} + +func (n *nodeContext) doDisjunct(c Conjunct, m defaultMode, mode runMode) (*nodeContext, *Bottom) { + if c.CloseInfo.cc == nil { + panic("nil closeContext during init") + } + n.ctx.stats.Disjuncts++ + + oc := newOverlayContext(n.ctx) + + var ccHole *closeContext + + // TODO(perf): resuse buffer, for instance by keeping a buffer handy in oc + // and then swapping it with disjunctCCs in the new nodeContext. + holes := make([]disjunctHole, 0, len(n.disjunctCCs)) + + // Clone the closeContexts of all open disjunctions and dependencies. + for _, d := range n.disjunctCCs { + // TODO: remove filled holes. + + // Note that the root is already cloned as part of cloneVertex and that + // a closeContext corresponding to a disjunction always has a parent. + // We therefore do not need to check whether x.parent is nil. + o := oc.allocCC(d.cc) + if c.CloseInfo.cc == d.underlying { + ccHole = o + } + holes = append(holes, disjunctHole{o, d.underlying}) + } + + if ccHole == nil { + panic("expected non-nil overlay closeContext") + } + + n.scheduler.blocking = n.scheduler.blocking[:0] + + d := oc.cloneRoot(n) + + d.defaultMode = combineDefault(m, n.defaultMode) + + v := d.node + + saved := n.node.BaseValue + n.node.BaseValue = v + defer func() { n.node.BaseValue = saved }() + + // Clear relevant scheduler states. + // TODO: do something more principled: just ensure that a node that has + // not all holes filled out yet is not finalized. This may require + // a special mode, or evaluating more aggressively if finalize is not given. + v.status = unprocessed + + d.overlays = n + d.disjunctCCs = append(d.disjunctCCs, holes...) + d.disjunct = c + c.CloseInfo.cc = ccHole + d.scheduleConjunct(c, c.CloseInfo) + ccHole.decDisjunct(n.ctx, DISJUNCT) + + oc.unlinkOverlay() + + v.unify(n.ctx, allKnown, mode) + + if err := d.getError(); err != nil && !isCyclePlaceholder(err) { + d.free() + return nil, err + } + + return d, nil +} + +func (n *nodeContext) finalizeDisjunctions() { + if len(n.disjuncts) == 0 { + return + } + + // TODO: we clear the Conjuncts to be compatible with the old evaluator. + // This is especially relevant for the API. Ideally, though, we should + // update Conjuncts to reflect the actual conjunct that went into the + // disjuncts. + for _, x := range n.disjuncts { + x.node.Conjuncts = nil + } + + a := make([]Value, len(n.disjuncts)) + p := 0 + hasDefaults := false + for i, x := range n.disjuncts { + switch x.defaultMode { + case isDefault: + a[i] = a[p] + a[p] = x.node + p++ + hasDefaults = true + + case notDefault: + hasDefaults = true + fallthrough + case maybeDefault: + a[i] = x.node + } + } + + d := &Disjunction{ + Values: a, + NumDefaults: p, + HasDefaults: hasDefaults, + } + + v := n.node + v.BaseValue = d + + // The conjuncts will have too much information. Better have no + // information than incorrect information. + v.Arcs = nil + v.ChildErrors = nil +} + +func (n *nodeContext) getError() *Bottom { + if b := n.node.Bottom(); b != nil && !isCyclePlaceholder(b) { + return b + } + if n.node.ChildErrors != nil { + return n.node.ChildErrors + } + if errs := n.errs; errs != nil { + return errs + } + if n.ctx.errs != nil { + return n.ctx.errs + } + return nil +} + +// appendDisjunct appends a disjunct x to a, if it is not a duplicate. +func appendDisjunct(ctx *OpContext, a []*nodeContext, x *nodeContext) []*nodeContext { + if x == nil { + return a + } + + nv := x.node.DerefValue() + nx := nv.BaseValue + if nx == nil || isCyclePlaceholder(nx) { + nx = x.getValidators(finalized) + } + + // check uniqueness + // TODO: if a node is not finalized, we could check that the parent + // (overlayed) closeContexts are identical. +outer: + for _, xn := range a { + xv := xn.node.DerefValue() + if xv.status != finalized || nv.status != finalized { + // Partial node + + // TODO: we could consider supporting an option here to disable + // the filter. This way, if there is a bug, users could disable + // it, trading correctness for performance. + // If enabled, we would simply "continue" here. + + for i, h := range xn.disjunctCCs { // TODO(perf): only iterate over completed + x, y := findIntersections(h.cc, x.disjunctCCs[i].cc) + if !equalPartialNode(xn.ctx, x, y) { + continue outer + } + } + if len(xn.tasks) != len(x.tasks) { + continue + } + for i, t := range xn.tasks { + s := x.tasks[i] + if s.x != t.x || s.id.cc != t.id.cc { + continue outer + } + } + vx, okx := nx.(Value) + ny := xv.BaseValue + if ny == nil || isCyclePlaceholder(ny) { + ny = x.getValidators(finalized) + } + vy, oky := ny.(Value) + if okx && oky && !Equal(ctx, vx, vy, CheckStructural) { + continue outer + + } + } else { + // Complete nodes. + if !Equal(ctx, xn.node.DerefValue(), x.node.DerefValue(), CheckStructural) { + continue outer + } + } + + // free vertex + if x.defaultMode == isDefault { + xn.defaultMode = isDefault + } + x.free() + return a + } + + return append(a, x) +} + +// isPartialNode reports whether a node must be evaluated as a partial node. +func isPartialNode(d *nodeContext) bool { + if d.node.status == finalized { + return true + } + // TODO: further optimizations + return false +} + +// findIntersections reports the closeContext, relative to the two given +// disjunction holds, that should be used in comparing the arc set. +// x and y MUST both be originating from the same disjunct hole. This ensures +// that the depth of the parent chain is the same and that they have the +// same underlying closeContext. +// +// Currently, we just take the parent. We should investigate if that is always +// sufficient. +// +// Tradeoffs: if we do not go up enough, the two nodes may not be equal and we +// miss the opportunity to filter. On the other hand, if we go up too far, we +// end up comparing more arcs than potentially necessary. +// +// TODO: Add a unit test when this function is fully implemented. +func findIntersections(x, y *closeContext) (cx, cy *closeContext) { + cx = x.parent + cy = y.parent + + // TODO: why could this happen? Investigate. Note that it is okay to just + // return x and y. In the worst case we will just miss some possible + // deduplication. + if cx == nil || cy == nil { + return x, y + } + + return cx, cy +} + +func equalPartialNode(ctx *OpContext, x, y *closeContext) bool { + nx := x.src.getState(ctx) + ny := y.src.getState(ctx) + + if nx == nil && ny == nil { + // Both nodes were finalized. We can compare them directly. + return Equal(ctx, x.src, y.src, CheckStructural) + } + + // TODO: process the nodes with allKnown, attemptOnly. + + if nx == nil || ny == nil { + return false + } + + if !isEqualNodeValue(nx, ny) { + return false + } + + if len(x.Patterns) != len(y.Patterns) { + return false + } + // Assume patterns are in the same order. + for i, p := range x.Patterns { + if !Equal(ctx, p, y.Patterns[i], 0) { + return false + } + } + + if !Equal(ctx, x.Expr, y.Expr, 0) { + return false + } + + if len(x.arcs) != len(y.arcs) { + return false + } + + // TODO(perf): use merge sort +outer: + for _, a := range x.arcs { + if a.kind != ARC { + continue outer + } + for _, b := range y.arcs { + if b.kind != ARC { + continue + } + if a.key.src.Label != b.key.src.Label { + continue + } + if !equalPartialNode(ctx, a.cc, b.cc) { + return false + } + continue outer + } + return false + } + return true +} + +// isEqualNodeValue reports whether the two nodes are of the same type and have +// the same value. +// +// TODO: this could be done much more cleanly if we are more deligent in early +// evaluation. +func isEqualNodeValue(x, y *nodeContext) bool { + xk := x.kind + yk := y.kind + + // If a node is mid evaluation, the kind might not be actual if the type is + // a struct, as whether a struct is a struct kind or an embedded type is + // determined later. This is just a limitation of the current + // implementation, we should update the kind more directly so that this code + // is not necessary. + // TODO: verify that this is still necessary and if so fix it so that this + // can be removed. + if x.aStruct != nil { + xk &= StructKind + } + if y.aStruct != nil { + yk &= StructKind + } + + if xk != yk { + return false + } + if x.hasTop != y.hasTop { + return false + } + if !isEqualBaseValue(x.ctx, x.scalar, y.scalar) { + return false + } + + // Do some quick checks first. + if len(x.checks) != len(y.checks) { + return false + } + if len(x.tasks) != len(y.tasks) { + return false + } + + if !isEqualBaseValue(x.ctx, x.lowerBound, y.lowerBound) { + return false + } + if !isEqualBaseValue(x.ctx, x.upperBound, y.upperBound) { + return false + } + + // Assume that checks are added in the same order. + for i, c := range x.checks { + d := y.checks[i] + if !Equal(x.ctx, c, d, CheckStructural) { + return false + } + } + + for i, t := range x.tasks { + s := y.tasks[i] + if s.x != t.x { + return false + } + if s.id.cc != t.id.cc { + // FIXME: we should compare this too. For this to work we need to + // have access to the underlying closeContext, which we do not + // have at the moment. + // return false + } + } + + return true +} + +func isEqualBaseValue(ctx *OpContext, x, y BaseValue) bool { + if x == y { + return true + } + xv, _ := x.(Value) + yv, _ := y.(Value) + if xv == nil || yv == nil { + return false + } + + return Equal(ctx, xv, yv, CheckStructural) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/doc.go b/vendor/cuelang.org/go/internal/core/adt/doc.go index 189b014f5b..84a0a14b9f 100644 --- a/vendor/cuelang.org/go/internal/core/adt/doc.go +++ b/vendor/cuelang.org/go/internal/core/adt/doc.go @@ -52,17 +52,17 @@ // e: f // } // f: 4 -// g: a.b { // d.c points to inside the referred value, e.f, not. +// g: a.b { // d.c points to inside the referred value; e.f, not. // c: 3 // } // // The implementation doesn't actually copy referred values, but rather resolves // references with the aid of an Environment. During compile time, each -// references is associated with the label and a number indicating in which +// reference is associated with the label and a number indicating in which // parent scope (offset from the current) this label needs to be looked up. An // Environment keeps track of the point at which a value was referenced, // providing enough information to look up the labeled value. This Environment -// is the identical for all references within a fields conjunct. Often, an +// is identical for all references within a fields conjunct. Often, an // Environment can even be shared among conjuncts. // // # Values diff --git a/vendor/cuelang.org/go/internal/core/adt/equality.go b/vendor/cuelang.org/go/internal/core/adt/equality.go index dcb8600a8f..cf2ff2ddeb 100644 --- a/vendor/cuelang.org/go/internal/core/adt/equality.go +++ b/vendor/cuelang.org/go/internal/core/adt/equality.go @@ -42,12 +42,24 @@ func equalVertex(ctx *OpContext, x *Vertex, v Value, flags Flag) bool { if !ok { return false } - if x == y { - return true - } + + // Note that the arc type of an originating node may be different than + // the one we are sharing. So do this check before dereferencing. + // For instance: + // + // a?: #B // ArcOptional + // #B: {} // ArcMember if x.ArcType != y.ArcType { return false } + + x = x.DerefValue() + y = y.DerefValue() + + if x == y { + return true + } + xk := x.Kind() yk := y.Kind() @@ -67,6 +79,9 @@ func equalVertex(ctx *OpContext, x *Vertex, v Value, flags Flag) bool { if x.IsClosedStruct() != y.IsClosedStruct() { return false } + if x.IsClosedList() != y.IsClosedList() { + return false + } if !equalClosed(ctx, x, y, flags) { return false } @@ -161,6 +176,10 @@ func equalTerminal(ctx *OpContext, v, w Value, flags Flag) bool { _, ok := w.(*Bottom) return ok + case *Top: + _, ok := w.(*Top) + return ok + case *Num, *String, *Bool, *Bytes, *Null: if b, ok := BinOp(ctx, EqualOp, v, w).(*Bool); ok { return b.B diff --git a/vendor/cuelang.org/go/internal/core/adt/errors.go b/vendor/cuelang.org/go/internal/core/adt/errors.go index 4de2a91c9e..581779fc42 100644 --- a/vendor/cuelang.org/go/internal/core/adt/errors.go +++ b/vendor/cuelang.org/go/internal/core/adt/errors.go @@ -133,7 +133,7 @@ func isIncomplete(v *Vertex) bool { if v == nil { return true } - if b, ok := v.BaseValue.(*Bottom); ok { + if b := v.Bottom(); b != nil { return b.IsIncomplete() } return false @@ -206,6 +206,22 @@ func CombineErrors(src ast.Node, x, y Value) *Bottom { } } +func addPositions(err *ValueError, c Conjunct) { + switch x := c.x.(type) { + case *Field: + // if x.ArcType == ArcRequired { + err.AddPosition(c.x) + // } + case *ConjunctGroup: + for _, c := range *x { + addPositions(err, c) + } + } + if c.CloseInfo.closeInfo != nil { + err.AddPosition(c.CloseInfo.location) + } +} + func NewRequiredNotPresentError(ctx *OpContext, v *Vertex) *Bottom { saved := ctx.PushArc(v) err := ctx.Newf("field is required but not present") @@ -230,9 +246,7 @@ func newRequiredFieldInComprehensionError(ctx *OpContext, x *ForClause, v *Verte err := ctx.Newf("missing required field in for comprehension: %v", v.Label) err.AddPosition(x.Src) for _, c := range v.Conjuncts { - if f, ok := c.x.(*Field); ok && f.ArcType == ArcRequired { - err.AddPosition(c.x) - } + addPositions(err, c) } return &Bottom{ Code: IncompleteError, @@ -240,6 +254,41 @@ func newRequiredFieldInComprehensionError(ctx *OpContext, x *ForClause, v *Verte } } +func (v *Vertex) reportFieldIndexError(c *OpContext, pos token.Pos, f Feature) { + v.reportFieldError(c, pos, f, + "index out of range [%d] with length %d", + "undefined field: %s") +} + +func (v *Vertex) reportFieldCycleError(c *OpContext, pos token.Pos, f Feature) *Bottom { + const msg = "cyclic reference to field %[1]v" + b := v.reportFieldError(c, pos, f, msg, msg) + return b +} + +func (v *Vertex) reportFieldError(c *OpContext, pos token.Pos, f Feature, intMsg, stringMsg string) *Bottom { + code := IncompleteError + if !v.Accept(c, f) { + code = EvalError + } + + label := f.SelectorString(c.Runtime) + + var err errors.Error + if f.IsInt() { + err = c.NewPosf(pos, intMsg, f.Index(), len(v.Elems())) + } else { + err = c.NewPosf(pos, stringMsg, label) + } + b := &Bottom{ + Code: code, + Err: err, + } + // TODO: yield failure + c.AddBottom(b) // TODO: unify error mechanism. + return b +} + // A ValueError is returned as a result of evaluating a value. type ValueError struct { r Runtime @@ -301,7 +350,15 @@ func appendNodePositions(a []token.Pos, n Node) []token.Pos { } if v, ok := n.(*Vertex); ok { for _, c := range v.Conjuncts { - a = appendNodePositions(a, c.Elem()) + switch x := c.x.(type) { + case *ConjunctGroup: + for _, c := range *x { + a = appendNodePositions(a, c.Elem()) + } + + default: + a = appendNodePositions(a, c.Elem()) + } } } return a @@ -321,6 +378,9 @@ func (c *OpContext) NewPosf(p token.Pos, format string, args ...interface{}) *Va a = appendNodePositions(a, x) args[i] = c.Str(x) case ast.Node: + // TODO: ideally the core evaluator should not depend on higher + // level packages. This will allow the debug packages to be used + // more widely. b, _ := cueformat.Node(x) if p := x.Pos(); p != token.NoPos { a = append(a, p) diff --git a/vendor/cuelang.org/go/internal/core/adt/eval.go b/vendor/cuelang.org/go/internal/core/adt/eval.go index 4f40363f6e..f26dd24ebb 100644 --- a/vendor/cuelang.org/go/internal/core/adt/eval.go +++ b/vendor/cuelang.org/go/internal/core/adt/eval.go @@ -73,7 +73,7 @@ var incompleteSentinel = &Bottom{ // error. // // TODO: return *Vertex -func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value { +func (c *OpContext) evaluate(v *Vertex, r Resolver, state combinedFlags) Value { if v.isUndefined() { // Use node itself to allow for cycle detection. c.unify(v, state) @@ -99,6 +99,8 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value { } if n := v.state; n != nil { + n.assertInitialized() + if n.errs != nil && !n.errs.IsIncomplete() { return n.errs } @@ -131,7 +133,7 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value { return nil } - if v.status < finalized && v.state != nil { + if v.status < finalized && v.state != nil && !c.isDevVersion() { // TODO: errors are slightly better if we always add addNotify, but // in this case it is less likely to cause a performance penalty. // See https://cuelang.org/issue/661. It may be possible to @@ -150,9 +152,15 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value { // state can be used to indicate to which extent processing should continue. // state == finalized means it is evaluated to completion. See vertexStatus // for more details. -func (c *OpContext) unify(v *Vertex, state vertexStatus) { +func (c *OpContext) unify(v *Vertex, flags combinedFlags) { + if c.isDevVersion() { + requires, mode := flags.conditions(), flags.runMode() + v.unify(c, requires, mode) + return + } + // defer c.PopVertex(c.PushVertex(v)) - if Debug { + if c.LogEval > 0 { c.nest++ c.Logf(v, "Unify") defer func() { @@ -166,6 +174,8 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) { n := v.getNodeContext(c, 1) defer v.freeNode(n) + state := flags.vertexStatus() + // TODO(cycle): verify this happens in all cases when we need it. if n != nil && v.Parent != nil && v.Parent.state != nil { n.depth = v.Parent.state.depth + 1 @@ -184,7 +194,7 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) { return case evaluatingArcs: - Assertf(v.status > 0, "unexpected status %d", v.status) + Assertf(c, v.status > 0, "unexpected status %d", v.status) return case 0: @@ -343,7 +353,7 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) { // completing all disjunctions. if !n.done() { if err := n.incompleteErrors(true); err != nil { - b, _ := n.node.BaseValue.(*Bottom) + b := n.node.Bottom() if b != err { err = CombineErrors(n.ctx.src, b, err) } @@ -370,6 +380,8 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) { // insertConjuncts inserts conjuncts previously not inserted. func (n *nodeContext) insertConjuncts(state vertexStatus) bool { + unreachableForDev(n.ctx) + // Exit early if we have a concrete value and only need partial results. if state == partial { for n.conjunctsPartialPos < len(n.conjuncts) { @@ -447,7 +459,7 @@ func (n *nodeContext) doNotify() { for _, rec := range n.notify { v := rec.v if v.state == nil { - if b, ok := v.BaseValue.(*Bottom); ok { + if b := v.Bottom(); b != nil { v.BaseValue = CombineErrors(nil, b, n.errs) } else { v.BaseValue = n.errs @@ -461,13 +473,14 @@ func (n *nodeContext) doNotify() { func (n *nodeContext) postDisjunct(state vertexStatus) { ctx := n.ctx + unreachableForDev(ctx) for { // Use maybeSetCache for cycle breaking for n.maybeSetCache(); n.expandOne(state); n.maybeSetCache() { } - if !n.addLists(state) { + if !n.addLists(oldOnly(state)) { break } } @@ -601,7 +614,7 @@ func (n *nodeContext) validateValue(state vertexStatus) { } else if len(n.node.Structs) > 0 { markStruct = n.kind&StructKind != 0 && !n.hasTop } - v := n.node.Value() + v := n.node.DerefValue().Value() if n.node.BaseValue == nil && markStruct { n.node.BaseValue = &StructMarker{} v = n.node @@ -671,6 +684,7 @@ func (n *nodeContext) incompleteErrors(final bool) *Bottom { // n := d.node.getNodeContext(ctx) // n.addBottom(err) if final && c.vertex != nil && c.vertex.status != finalized { + c.vertex.state.assertInitialized() c.vertex.state.addBottom(err) c.vertex = nil } @@ -735,6 +749,8 @@ func (n *nodeContext) incompleteErrors(final bool) *Bottom { // though, that any potential performance issues are eliminated for // Protobuf-like oneOf fields. func (n *nodeContext) checkClosed(state vertexStatus) bool { + unreachableForDev(n.ctx) + ignore := state != finalized || n.skipNonMonotonicChecks() v := n.node @@ -754,6 +770,8 @@ func (n *nodeContext) checkClosed(state vertexStatus) bool { } func (n *nodeContext) completeArcs(state vertexStatus) { + unreachableForDev(n.ctx) + if DebugSort > 0 { DebugSortArcs(n.ctx, n.node) } @@ -790,7 +808,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) { wasVoid := a.ArcType == ArcPending - ctx.unify(a, finalized) + ctx.unify(a, oldOnly(finalized)) if a.ArcType == ArcPending { continue @@ -804,7 +822,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) { state = conjuncts } - if err, _ := a.BaseValue.(*Bottom); err != nil { + if err := a.Bottom(); err != nil { n.node.AddChildError(err) } } @@ -830,7 +848,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) { // faulty CUE using this mechanism, though. At most error // messages are a bit unintuitive. This may change once we have // functionality to reflect on types. - if _, ok := n.node.BaseValue.(*Bottom); !ok { + if !n.node.IsErr() { n.node.BaseValue = &StructMarker{} n.kind = StructKind } @@ -844,7 +862,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) { // TODO(errors): make Validate return bottom and generate // optimized conflict message. Also track and inject IDs // to determine origin location.s - v := ctx.evalState(c.expr, finalized) + v := ctx.evalState(c.expr, oldOnly(finalized)) v, _ = ctx.getDefault(v) v = Unwrap(v) @@ -902,6 +920,10 @@ var cycle = &Bottom{ } func isCyclePlaceholder(v BaseValue) bool { + // TODO: do not mark cycle in BaseValue. + if a, _ := v.(*Vertex); a != nil { + v = a.DerefValue().BaseValue + } return v == cycle } @@ -952,17 +974,21 @@ type nodeContext struct { nextFree *nodeContext refCount int - // Keep these two out of the nodeContextState to make them more accessible + // Keep node out of the nodeContextState to make them more accessible // for source-level debuggers. - ctx *OpContext node *Vertex + // underlying is the original Vertex that this node overlays. It should be + // set for all Vertex values that were cloned. + underlying *Vertex + + // overlays is set if this node is the root of a disjunct created in + // doDisjunct. It points to the direct parent nodeContext. + overlays *nodeContext + nodeContextState - // rootCloseContext should not be cloned as clones need to get their own - // copies of this. For this reason it is not included in nodeContextState, - // as it prevents it from being set "by default". - rootCloseContext *closeContext + scheduler // Below are slices that need to be managed when cloning and reclaiming // nodeContexts for reuse. We want to ensure that, instead of setting @@ -995,15 +1021,28 @@ type nodeContext struct { // Disjunction handling disjunctions []envDisjunct + // disjunctCCs holds the close context that represent "holes" in which + // pending disjuncts are to be inserted for the clone represented by this + // nodeContext. Holes that are not yet filled will always need to be cloned + // when a disjunction branches in doDisjunct. + // + // Holes may accumulate as nested disjunctions get added and filled holes + // may be removed. So the list of disjunctCCs may differ from the number + // of disjunctions. + disjunctCCs []disjunctHole + // usedDefault indicates the for each of possibly multiple parent // disjunctions whether it is unified with a default disjunct or not. // This is then later used to determine whether a disjunction should // be treated as a marked disjunction. usedDefault []defaultInfo + // disjuncts holds disjuncts that evaluated to a non-bottom value. + // TODO: come up with a better name. disjuncts []*nodeContext buffer []*nodeContext disjunctErrs []*Bottom + disjunct Conjunct // snapshot holds the last value of the vertex before calling postDisjunct. snapshot Vertex @@ -1024,12 +1063,36 @@ type conjunct struct { } type nodeContextState struct { + // isInitialized indicates whether conjuncts have been inserted in the node. + isInitialized bool + + // toComplete marks whether completeNodeTasks needs to be called on this + // node after a corresponding task has been completed. + toComplete bool + + // isCompleting > 0 indicates whether a call to completeNodeTasks is in + // progress. + isCompleting int + + // evalDept is a number that is assigned when evaluating arcs and is set to + // detect structural cycles. This value may be temporarily altered when a + // node descends into evaluating a value that may be an error (pattern + // constraints, optional fields, etc.). A non-zero value always indicates + // that there are cyclic references, though. + evalDepth int + // State info hasTop bool hasCycle bool // has conjunct with structural cycle hasNonCycle bool // has conjunct without structural cycle + isShared bool // set if we are currently structure sharing. + noSharing bool // set if structure sharing is not allowed + shared Conjunct // the original conjunct that led to sharing + sharedID CloseInfo // the original CloseInfo that led to sharing + origBaseValue BaseValue // the BaseValue that structure sharing replaces. + depth int32 defaultMode defaultMode @@ -1046,6 +1109,11 @@ type nodeContextState struct { aStruct Expr aStructID CloseInfo + // List fields + listIsClosed bool + maxListLen int + maxNode Expr + lowerBound *BoundValue // > or >= upperBound *BoundValue // < or <= errs *Bottom @@ -1059,6 +1127,8 @@ type nodeContextState struct { // conjunctsPartialPos is like conjunctsPos, but for the 'partial' phase // of processing where conjuncts are only processed as concrete scalars. conjunctsPartialPos int + + arcPos int } // A receiver receives notifications. @@ -1086,6 +1156,8 @@ type defaultInfo struct { } func (n *nodeContext) addNotify(v *Vertex, cc *closeContext) { + unreachableForDev(n.ctx) + if v != nil && !n.node.hasAllConjuncts { n.notify = append(n.notify, receiver{v, cc}) } @@ -1104,6 +1176,8 @@ func (n *nodeContext) clone() *nodeContext { d.arcMap = append(d.arcMap, n.arcMap...) d.notify = append(d.notify, n.notify...) + n.scheduler.cloneInto(&d.scheduler) + d.conjuncts = append(d.conjuncts, n.conjuncts...) d.cyclicConjuncts = append(d.cyclicConjuncts, n.cyclicConjuncts...) d.dynamicFields = append(d.dynamicFields, n.dynamicFields...) @@ -1129,8 +1203,8 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { c.freeListNode = n.nextFree *n = nodeContext{ - ctx: c, - node: node, + scheduler: scheduler{ctx: c}, + node: node, nodeContextState: nodeContextState{ kind: TopKind, }, @@ -1147,25 +1221,36 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext { vLists: n.vLists[:0], exprs: n.exprs[:0], disjunctions: n.disjunctions[:0], + disjunctCCs: n.disjunctCCs[:0], usedDefault: n.usedDefault[:0], disjunctErrs: n.disjunctErrs[:0], disjuncts: n.disjuncts[:0], buffer: n.buffer[:0], } + n.scheduler.clear() + n.scheduler.node = n + n.underlying = node return n } c.stats.Allocs++ - return &nodeContext{ - ctx: c, + n := &nodeContext{ + scheduler: scheduler{ + ctx: c, + }, node: node, nodeContextState: nodeContextState{kind: TopKind}, } + n.scheduler.node = n + n.underlying = node + return n } func (v *Vertex) getNodeContext(c *OpContext, ref int) *nodeContext { + unreachableForDev(c) + if v.state == nil { if v.status == finalized { return nil @@ -1219,6 +1304,7 @@ func (c *OpContext) freeNodeContext(n *nodeContext) { c.freeListNode = n n.node = nil n.refCount = 0 + n.scheduler.clear() } // TODO(perf): return a dedicated ConflictError that can track original @@ -1321,6 +1407,8 @@ func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool { } func (n *nodeContext) done() bool { + // TODO(v0.7): verify that done() is checking for the right conditions in + // the new evaluator implementation. return len(n.dynamicFields) == 0 && len(n.comprehensions) == 0 && len(n.exprs) == 0 @@ -1329,6 +1417,7 @@ func (n *nodeContext) done() bool { // finalDone is like done, but allows for cycle errors, which can be ignored // as they essentially indicate a = a & _. func (n *nodeContext) finalDone() bool { + // TODO(v0.7): update for new evaluator? for _, x := range n.exprs { if x.err.Code != CycleError { return false @@ -1342,6 +1431,8 @@ func (n *nodeContext) finalDone() bool { // hasErr is used to determine if an evaluation path, for instance a single // path after expanding all disjunctions, has an error. func (n *nodeContext) hasErr() bool { + n.assertInitialized() + if n.node.ChildErrors != nil { return true } @@ -1352,12 +1443,16 @@ func (n *nodeContext) hasErr() bool { } func (n *nodeContext) getErr() *Bottom { + n.assertInitialized() + n.errs = CombineErrors(nil, n.errs, n.ctx.Err()) return n.errs } // getValidators sets the vertex' Value in case there was no concrete value. func (n *nodeContext) getValidators(state vertexStatus) BaseValue { + n.assertInitialized() + ctx := n.ctx a := []Value{} @@ -1458,6 +1553,8 @@ type envCheck struct { } func (n *nodeContext) addBottom(b *Bottom) { + n.assertInitialized() + n.errs = CombineErrors(nil, n.errs, b) // TODO(errors): consider doing this // n.kindExpr = n.errs @@ -1465,6 +1562,8 @@ func (n *nodeContext) addBottom(b *Bottom) { } func (n *nodeContext) addErr(err errors.Error) { + n.assertInitialized() + if err != nil { n.addBottom(&Bottom{Err: err}) } @@ -1474,6 +1573,8 @@ func (n *nodeContext) addErr(err errors.Error) { // into the nodeContext if successful or queue it for later evaluation if it is // incomplete or is not value. func (n *nodeContext) addExprConjunct(v Conjunct, state vertexStatus) { + unreachableForDev(n.ctx) + env := v.Env id := v.CloseInfo @@ -1525,6 +1626,8 @@ func (n *nodeContext) addExprConjunct(v Conjunct, state vertexStatus) { // evalExpr is only called by addExprConjunct. If an error occurs, it records // the error in n and returns nil. func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) { + unreachableForDev(n.ctx) + // Require an Environment. ctx := n.ctx @@ -1539,7 +1642,7 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) { if state == finalized { state = conjuncts } - arc, err := ctx.resolveState(v, x, state) + arc, err := ctx.resolveState(v, x, oldOnly(state)) if err != nil && (!err.IsIncomplete() || err.Permanent) { n.addBottom(err) break @@ -1576,7 +1679,7 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) { case Evaluator: // Interpolation, UnaryExpr, BinaryExpr, CallExpr // Could be unify? - val := ctx.evaluateRec(v, partial) + val := ctx.evaluateRec(v, oldOnly(partial)) if b, ok := val.(*Bottom); ok && b.IsIncomplete() { n.exprs = append(n.exprs, envExpr{v, b}) @@ -1616,6 +1719,8 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) { } func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) { + unreachableForDev(n.ctx) + closeInfo := c.CloseInfo // We need to ensure that each arc is only unified once (or at least) a @@ -1689,7 +1794,7 @@ func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) { // is necessary to prevent lookups in unevaluated structs. // TODO(cycles): this can probably most easily be fixed with a // having a more recursive implementation. - n.ctx.unify(arc, partial) + n.ctx.unify(arc, oldOnly(partial)) } // Don't add conjuncts if a node is referring to itself. @@ -1945,7 +2050,7 @@ func (n *nodeContext) addStruct( Vertex: n.node, } - s.Init() + s.Init(n.ctx) if s.HasEmbed && !s.IsFile() { closeInfo = closeInfo.SpawnGroup(nil) @@ -2021,6 +2126,10 @@ func (n *nodeContext) addStruct( // disjunctions. func (n *nodeContext) insertField(f Feature, mode ArcType, x Conjunct) *Vertex { ctx := n.ctx + if ctx.isDevVersion() { + return n.insertArc(f, mode, x, x.CloseInfo, true) + } + arc, isNew := n.node.GetArc(ctx, f, mode) if f.IsLet() && !isNew { arc.MultiLet = true @@ -2050,6 +2159,10 @@ func (n *nodeContext) insertField(f Feature, mode ArcType, x Conjunct) *Vertex { func (n *nodeContext) insertFieldUnchecked(f Feature, mode ArcType, x Conjunct) *Vertex { ctx := n.ctx + if ctx.isDevVersion() { + return n.insertArc(f, mode, x, x.CloseInfo, false) + } + arc, isNew := n.node.GetArc(ctx, f, mode) if f.IsLet() && !isNew { arc.MultiLet = true @@ -2073,6 +2186,8 @@ func (n *nodeContext) insertFieldUnchecked(f Feature, mode ArcType, x Conjunct) // TODO(errors): detect when a field is added to a struct that is already used // in a for clause. func (n *nodeContext) expandOne(state vertexStatus) (done bool) { + unreachableForDev(n.ctx) + // Don't expand incomplete expressions if we detected a cycle. if n.done() || (n.hasCycle && !n.hasNonCycle) { return false @@ -2110,6 +2225,8 @@ func (n *nodeContext) expandOne(state vertexStatus) (done bool) { // injectDynamic evaluates and inserts dynamic declarations. func (n *nodeContext) injectDynamic() (progress bool) { + unreachableForDev(n.ctx) + ctx := n.ctx k := 0 @@ -2119,7 +2236,7 @@ func (n *nodeContext) injectDynamic() (progress bool) { x := d.field.Key // Push state to capture and remove errors. s := ctx.PushState(d.env, x.Source()) - v := ctx.evalState(x, finalized) + v := ctx.evalState(x, oldOnly(finalized)) b := ctx.PopState(s) if b != nil && b.IsIncomplete() { @@ -2160,7 +2277,7 @@ func (n *nodeContext) injectDynamic() (progress bool) { // // TODO(embeddedScalars): for embedded scalars, there should be another pass // of evaluation expressions after expanding lists. -func (n *nodeContext) addLists(state vertexStatus) (progress bool) { +func (n *nodeContext) addLists(state combinedFlags) (progress bool) { if len(n.lists) == 0 && len(n.vLists) == 0 { return false } @@ -2246,7 +2363,7 @@ outer: if err != nil { if err.ForCycle && !l.self { // The list has a comprehension that refers to the list - // itself. This means we should postpone evalauting this + // itself. This means we should postpone evaluating this // list until all other lists have been evaluated. n.lists[i].ignore = true l.self = true @@ -2324,7 +2441,7 @@ outer: s := l.list.info if s == nil { s = &StructLit{Decls: []Decl{l.elipsis}} - s.Init() + s.Init(n.ctx) l.list.info = s } info := n.node.AddStruct(s, l.env, l.id) diff --git a/vendor/cuelang.org/go/internal/core/adt/expr.go b/vendor/cuelang.org/go/internal/core/adt/expr.go index 8112e3d001..445ca925ec 100644 --- a/vendor/cuelang.org/go/internal/core/adt/expr.go +++ b/vendor/cuelang.org/go/internal/core/adt/expr.go @@ -27,6 +27,16 @@ import ( "cuelang.org/go/cue/token" ) +var _ Elem = &ConjunctGroup{} + +// A ConjunctGroup is an Elem that is used for internal grouping of Conjuncts +// only. +type ConjunctGroup []Conjunct + +func (g *ConjunctGroup) Source() ast.Node { + return nil +} + // A StructLit represents an unevaluated struct literal or file body. type StructLit struct { Src ast.Node // ast.File or ast.StructLit @@ -71,7 +81,7 @@ func (x *StructLit) HasOptional() bool { func (x *StructLit) Source() ast.Node { return x.Src } -func (x *StructLit) evaluate(c *OpContext, state vertexStatus) Value { +func (x *StructLit) evaluate(c *OpContext, state combinedFlags) Value { e := c.Env(0) v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci}) // evaluate may not finalize a field, as the resulting value may be @@ -87,11 +97,16 @@ func (o *StructLit) MarkField(f Feature) { o.Fields = append(o.Fields, FieldInfo{Label: f}) } -func (o *StructLit) Init() { +func (o *StructLit) Init(ctx *OpContext) { if o.initialized { return } o.initialized = true + + if ctx.isDevVersion() { + return + } + for _, d := range o.Decls { switch x := d.(type) { case *Field: @@ -172,10 +187,6 @@ func (o *StructLit) OptionalTypes() OptionalType { // foo: bar // #foo: bar // _foo: bar -// -// Legacy: -// -// Foo :: bar type Field struct { Src *ast.Field @@ -281,7 +292,7 @@ func (x *ListLit) Source() ast.Node { return x.Src } -func (x *ListLit) evaluate(c *OpContext, state vertexStatus) Value { +func (x *ListLit) evaluate(c *OpContext, state combinedFlags) Value { e := c.Env(0) v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci}) v.CompleteArcs(c) @@ -446,8 +457,10 @@ func (x *BoundExpr) Source() ast.Node { return x.Src } -func (x *BoundExpr) evaluate(ctx *OpContext, state vertexStatus) Value { - v := ctx.value(x.Expr, partial) +func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value { + // scalarKnown is used here to ensure we know the value. The result does + // not have to be concrete, though. + v := ctx.value(x.Expr, require(partial, scalarKnown)) if isError(v) { return v } @@ -679,7 +692,7 @@ func (x *NodeLink) Kind() Kind { } func (x *NodeLink) Source() ast.Node { return x.Node.Source() } -func (x *NodeLink) resolve(c *OpContext, state vertexStatus) *Vertex { +func (x *NodeLink) resolve(c *OpContext, state combinedFlags) *Vertex { return x.Node } @@ -699,7 +712,7 @@ func (x *FieldReference) Source() ast.Node { return x.Src } -func (x *FieldReference) resolve(c *OpContext, state vertexStatus) *Vertex { +func (x *FieldReference) resolve(c *OpContext, state combinedFlags) *Vertex { n := c.relNode(x.UpCount) pos := pos(x) return c.lookup(n, pos, x.Label, state) @@ -723,7 +736,7 @@ func (x *ValueReference) Source() ast.Node { return x.Src } -func (x *ValueReference) resolve(c *OpContext, state vertexStatus) *Vertex { +func (x *ValueReference) resolve(c *OpContext, state combinedFlags) *Vertex { if x.UpCount == 0 { return c.vertex } @@ -750,7 +763,7 @@ func (x *LabelReference) Source() ast.Node { return x.Src } -func (x *LabelReference) evaluate(ctx *OpContext, state vertexStatus) Value { +func (x *LabelReference) evaluate(ctx *OpContext, state combinedFlags) Value { label := ctx.relLabel(x.UpCount) if label == 0 { // There is no label. This may happen if a LabelReference is evaluated @@ -794,15 +807,15 @@ func (x *DynamicReference) Source() ast.Node { func (x *DynamicReference) EvaluateLabel(ctx *OpContext, env *Environment) Feature { env = env.up(ctx, x.UpCount) frame := ctx.PushState(env, x.Src) - v := ctx.value(x.Label, partial) + v := ctx.value(x.Label, require(partial, scalarKnown)) ctx.PopState(frame) return ctx.Label(x, v) } -func (x *DynamicReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { +func (x *DynamicReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { e := ctx.Env(x.UpCount) frame := ctx.PushState(e, x.Src) - v := ctx.value(x.Label, partial) + v := ctx.value(x.Label, require(partial, scalarKnown)) ctx.PopState(frame) f := ctx.Label(x.Label, v) return ctx.lookup(e.Vertex, pos(x), f, state) @@ -828,7 +841,7 @@ func (x *ImportReference) Source() ast.Node { return x.Src } -func (x *ImportReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { +func (x *ImportReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { path := x.ImportPath.StringValue(ctx) v := ctx.Runtime.LoadImport(path) if v == nil { @@ -856,14 +869,14 @@ func (x *LetReference) Source() ast.Node { return x.Src } -func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { +func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex { e := ctx.Env(x.UpCount) n := e.Vertex // No need to Unify n, as Let references can only result from evaluating // an expression within n, in which case evaluation must already have // started. - if n.status < evaluating { + if n.status < evaluating && !ctx.isDevVersion() { panic("unexpected node state < Evaluating") } @@ -897,8 +910,8 @@ func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { // We can reevaluate this once we have redone some of the planned order of // evaluation work. arc.Finalize(ctx) - b, ok := arc.BaseValue.(*Bottom) - if !arc.MultiLet && !ok { + b := arc.Bottom() + if !arc.MultiLet && b == nil { return arc } @@ -907,6 +920,14 @@ func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { // any other context. c := arc.Conjuncts[0] expr := c.Expr() + + // A let field always has a single expression and thus ConjunctGroups + // should always have been eliminated. This is critical, as we must + // ensure that Comprehensions, which may be wrapped in ConjunctGroups, + // are eliminated. + _, isGroup := expr.(*ConjunctGroup) + ctx.Assertf(pos(expr), !isGroup, "unexpected number of expressions") + key := cacheKey{expr, arc} v, ok := e.cache[key] if !ok { @@ -926,8 +947,22 @@ func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex { } v = n e.cache[key] = n - nc := n.getNodeContext(ctx, 0) - nc.hasNonCycle = true // Allow a first cycle to be skipped. + if ctx.isDevVersion() { + nc := n.getState(ctx) + // TODO: unlike with the old evaluator, we do not allow the first + // cycle to be skipped. Doing so can lead to hanging evaluation. + // As the cycle detection works slightly differently in the new + // evaluator (and is not entirely completed), this can happen. We + // should revisit this once we have completed the structural cycle + // detection. + // nc.hasNonCycle = true + // Allow a first cycle to be skipped. + nc.free() + n.unify(ctx, allKnown, finalize) + } else { + nc := n.getNodeContext(ctx, 0) + nc.hasNonCycle = true // Allow a first cycle to be skipped. + } // Parents cannot add more conjuncts to a let expression, so set of // conjuncts is always complete. @@ -959,16 +994,21 @@ func (x *SelectorExpr) Source() ast.Node { return x.Src } -func (x *SelectorExpr) resolve(c *OpContext, state vertexStatus) *Vertex { +func (x *SelectorExpr) resolve(c *OpContext, state combinedFlags) *Vertex { // TODO: the node should really be evaluated as AllConjunctsDone, but the // order of evaluation is slightly off, causing too much to be evaluated. // This may especially result in incorrect results when using embedded // scalars. - n := c.node(x, x.X, x.Sel.IsRegular(), partial) + // In the new evaluator, evaluation of the node is done in lookup. + // TODO: + // - attempt: if we ensure that errors are propagated in pending arcs. + // - require: if we want to ensure that all arcs + // are known now. + n := c.node(x, x.X, x.Sel.IsRegular(), attempt(partial, needFieldSetKnown)) if n == emptyNode { return n } - if n.status == partial { + if n.status == partial && !c.isDevVersion() { if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError { c.AddBottom(b) return n @@ -978,7 +1018,8 @@ func (x *SelectorExpr) resolve(c *OpContext, state vertexStatus) *Vertex { // will otherwise be discarded and there will be no other chance to check // the struct is valid. - return c.lookup(n, x.Src.Sel.Pos(), x.Sel, state) + pos := x.Src.Sel.Pos() + return c.lookup(n, pos, x.Sel, state) } // IndexExpr is like a selector, but selects an index. @@ -997,18 +1038,18 @@ func (x *IndexExpr) Source() ast.Node { return x.Src } -func (x *IndexExpr) resolve(ctx *OpContext, state vertexStatus) *Vertex { +func (x *IndexExpr) resolve(ctx *OpContext, state combinedFlags) *Vertex { // TODO: support byte index. // TODO: the node should really be evaluated as AllConjunctsDone, but the // order of evaluation is slightly off, causing too much to be evaluated. // This may especially result in incorrect results when using embedded // scalars. - n := ctx.node(x, x.X, true, partial) - i := ctx.value(x.Index, partial) + n := ctx.node(x, x.X, true, attempt(partial, needFieldSetKnown)) + i := ctx.value(x.Index, require(partial, scalarKnown)) if n == emptyNode { return n } - if n.status == partial { + if n.status == partial && !ctx.isDevVersion() { if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError { ctx.AddBottom(b) return n @@ -1019,7 +1060,17 @@ func (x *IndexExpr) resolve(ctx *OpContext, state vertexStatus) *Vertex { // the struct is valid. f := ctx.Label(x.Index, i) - return ctx.lookup(n, x.Src.Index.Pos(), f, state) + + // Within lookup, errors collected in ctx may be associated with n. This is + // correct if the error is generated within lookup, but not if it has + // already been generated at this point. We therefore bail out early here if + // we already have an error. + // TODO: this code can probably go once we have cleaned up error generation. + if ctx.errs != nil { + return nil + } + pos := x.Src.Index.Pos() + return ctx.lookup(n, pos, f, state) } // A SliceExpr represents a slice operation. (Not currently in spec.) @@ -1040,10 +1091,10 @@ func (x *SliceExpr) Source() ast.Node { return x.Src } -func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value { +func (x *SliceExpr) evaluate(c *OpContext, state combinedFlags) Value { // TODO: strides - v := c.value(x.X, partial) + v := c.value(x.X, require(partial, fieldSetKnown)) const as = "slice index" switch v := v.(type) { @@ -1060,10 +1111,10 @@ func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value { hi = uint64(len(v.Arcs)) ) if x.Lo != nil { - lo = c.uint64(c.value(x.Lo, partial), as) + lo = c.uint64(c.value(x.Lo, require(partial, scalarKnown)), as) } if x.Hi != nil { - hi = c.uint64(c.value(x.Hi, partial), as) + hi = c.uint64(c.value(x.Hi, require(partial, scalarKnown)), as) if hi > uint64(len(v.Arcs)) { return c.NewErrf("index %d out of range", hi) } @@ -1093,10 +1144,10 @@ func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value { hi = uint64(len(v.B)) ) if x.Lo != nil { - lo = c.uint64(c.value(x.Lo, partial), as) + lo = c.uint64(c.value(x.Lo, require(partial, scalarKnown)), as) } if x.Hi != nil { - hi = c.uint64(c.value(x.Hi, partial), as) + hi = c.uint64(c.value(x.Hi, require(partial, scalarKnown)), as) if hi > uint64(len(v.B)) { return c.NewErrf("index %d out of range", hi) } @@ -1129,10 +1180,10 @@ func (x *Interpolation) Source() ast.Node { return x.Src } -func (x *Interpolation) evaluate(c *OpContext, state vertexStatus) Value { +func (x *Interpolation) evaluate(c *OpContext, state combinedFlags) Value { buf := bytes.Buffer{} for _, e := range x.Parts { - v := c.value(e, partial) + v := c.value(e, require(partial, scalarKnown)) if x.K == BytesKind { buf.Write(c.ToBytes(v)) } else { @@ -1171,11 +1222,11 @@ func (x *UnaryExpr) Source() ast.Node { return x.Src } -func (x *UnaryExpr) evaluate(c *OpContext, state vertexStatus) Value { +func (x *UnaryExpr) evaluate(c *OpContext, state combinedFlags) Value { if !c.concreteIsPossible(x.Op, x.X) { return nil } - v := c.value(x.X, partial) + v := c.value(x.X, require(partial, scalarKnown)) if isError(v) { return v } @@ -1232,7 +1283,7 @@ func (x *BinaryExpr) Source() ast.Node { return x.Src } -func (x *BinaryExpr) evaluate(c *OpContext, state vertexStatus) Value { +func (x *BinaryExpr) evaluate(c *OpContext, state combinedFlags) Value { env := c.Env(0) if x.Op == AndOp { v := c.newInlineVertex(nil, nil, makeAnonymousConjunct(env, x, c.ci.Refs)) @@ -1284,14 +1335,18 @@ func (x *BinaryExpr) evaluate(c *OpContext, state vertexStatus) Value { return BinOp(c, x.Op, left, right) } -func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, state vertexStatus) (r Value) { +func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flags combinedFlags) (r Value) { + state := flags.vertexStatus() + s := c.PushState(env, src) match := op != EqualOp // non-error case // Like value(), but retain the original, unwrapped result. c.inValidator++ - v := c.evalState(x, state) + req := flags + req = final(state, needTasksDone) + v := c.evalState(x, req) c.inValidator-- u, _ := c.getDefault(v) u = Unwrap(u) @@ -1316,7 +1371,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, stat return nil case IncompleteError: - c.evalState(x, finalized) + c.evalState(x, oldOnly(finalized)) // We have a nonmonotonic use of a failure. Referenced fields should // not be added anymore. @@ -1381,7 +1436,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, stat match = op == EqualOp } - c.evalState(x, partial) + c.evalState(x, require(state, needTasksDone)) } c.PopState(s) @@ -1389,7 +1444,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, stat } func isFinalError(n *Vertex) bool { - n = n.Indirect() + n = n.DerefValue() if b, ok := Unwrap(n).(*Bottom); ok && b.Code < IncompleteError { return true } @@ -1428,15 +1483,15 @@ func (x *CallExpr) Source() ast.Node { return x.Src } -func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value { - fun := c.value(x.Fun, partial) +func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value { + fun := c.value(x.Fun, require(partial, concreteKnown)) var b *Builtin switch f := fun.(type) { case *Builtin: b = f case *BuiltinValidator: - // We allow a validator that takes no arguments accept the validated + // We allow a validator that takes no arguments except the validated // value to be called with zero arguments. switch { case f.Src != nil: @@ -1459,6 +1514,12 @@ func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value { for i, a := range x.Args { saved := c.errs c.errs = nil + // XXX: XXX: clear id.closeContext per argument and remove from runTask? + + runMode := state.runMode() + cond := state.conditions() | allAncestorsProcessed | concreteKnown + state = combineMode(cond, runMode).withVertexStatus(state.vertexStatus()) + expr := c.value(a, state) switch v := expr.(type) { @@ -1489,7 +1550,7 @@ func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value { if result == nil { return nil } - return c.evalState(result, partial) + return c.evalState(result, state.withVertexStatus(partial)) } // A Builtin is a value representing a native function call. @@ -1597,7 +1658,7 @@ func (x *Builtin) call(c *OpContext, p token.Pos, validate bool, args []Value) E x := &BinaryExpr{Op: AndOp, X: v, Y: a} n := c.newInlineVertex(nil, nil, Conjunct{env, x, c.ci}) n.Finalize(c) - if _, ok := n.BaseValue.(*Bottom); ok { + if n.IsErr() { c.addErrf(0, pos(a), "cannot use %s as %s in argument %d to %v", a, v, i+1, fun) @@ -1728,7 +1789,7 @@ func (x *DisjunctionExpr) Source() ast.Node { return x.Src } -func (x *DisjunctionExpr) evaluate(c *OpContext, state vertexStatus) Value { +func (x *DisjunctionExpr) evaluate(c *OpContext, state combinedFlags) Value { e := c.Env(0) v := c.newInlineVertex(nil, nil, Conjunct{e, x, c.ci}) v.Finalize(c) // TODO: also partial okay? @@ -1794,6 +1855,16 @@ type Comprehension struct { // The type of field as which the comprehension is added. arcType ArcType + // The closeContext into which the comprehension is added. Upon a successful + // completion of the comprehension, the arcType should be updated in this + // closeContext. After this is done, the corresponding parent closeContext + // must be closed. + arcCC *closeContext + + // This is incremented by the Comprehension upon creation, and decremented + // once it is known whether the comprehension succeeded. + cc *closeContext + // Only used for partial comprehensions. comp *envComprehension parent *Comprehension // comprehension from which this one was derived, if any @@ -1850,39 +1921,115 @@ func (x *ForClause) Source() ast.Node { return x.Syntax } +func (c *OpContext) forSource(x Expr) *Vertex { + state := require(conjuncts, needFieldSetKnown) + + // TODO: always get the vertex. This allows a whole bunch of trickery + // down the line. + v := c.unifyNode(x, state) + + node, ok := v.(*Vertex) + if ok && c.isDevVersion() { + node.unify(c, state.conditions(), yield) + } + + v, ok = c.getDefault(v) + + if !ok { + // Error already generated by getDefault. + return emptyNode + } + + // TODO: skip in new evaluator? Check once we introduce disjunctions. + if w := Unwrap(v); !isCyclePlaceholder(w) { + v = w + } + node, ok = v.(*Vertex) + if ok && !isCyclePlaceholder(node.BaseValue) { + v = node.Value() + } + + switch nv := v.(type) { + case nil: + c.addErrf(IncompleteError, pos(x), + "cannot range over %s (incomplete)", x) + return emptyNode + + case *Bottom: + // TODO: this is a bit messy. In some cases errors are already added + // and in some cases not. Not a huge deal, as errors will be uniqued + // down the line, but could be better. + c.AddBottom(nv) + return emptyNode + + case *Vertex: + if node == nil { + panic("unexpected markers with nil node") + } + + default: + if kind := v.Kind(); kind&StructKind != 0 { + c.addErrf(IncompleteError, pos(x), + "cannot range over %s (incomplete type %s)", x, kind) + return emptyNode + + } else if !ok { + c.addErrf(0, pos(x), // TODO(error): better message. + "cannot range over %s (found %s, want list or struct)", + x.Source(), v.Kind()) + return emptyNode + } + } + + return node +} + func (x *ForClause) yield(s *compState) { c := s.ctx - n := c.node(x, x.Src, true, conjuncts) - if n.status == evaluating && !n.LockArcs { - c.AddBottom(&Bottom{ - Code: CycleError, - ForCycle: true, - Value: n, - Err: errors.Newf(pos(x.Src), "comprehension source references itself"), - }) - return - } - if c.HasErr() { - return + n := c.forSource(x.Src) + + if c.isDevVersion() { + if s := n.getState(c); s != nil { + s.freeze(fieldSetKnown) + } + } else { + if n.status == evaluating && !n.LockArcs { + c.AddBottom(&Bottom{ + Code: CycleError, + ForCycle: true, + Value: n, + Err: errors.Newf(pos(x.Src), "comprehension source references itself"), + }) + return + } + if c.HasErr() { + return + } + n.LockArcs = true } - n.LockArcs = true + for _, a := range n.Arcs { if !a.Label.IsRegular() { continue } - if !a.isDefined() { - a.Finalize(c) - switch a.ArcType { - case ArcMember: - case ArcRequired: - c.AddBottom(newRequiredFieldInComprehensionError(c, x, a)) - continue - default: + + if c.isDevVersion() { + c.require(a, arcTypeKnown) + } else { + if !a.isDefined() { + a.Finalize(c) + } + if !a.definitelyExists() { continue } } - if !a.definitelyExists() { + switch a.ArcType { + case ArcMember: + case ArcRequired: + c.AddBottom(newRequiredFieldInComprehensionError(c, x, a)) + continue + default: continue } @@ -1943,7 +2090,7 @@ func (x *IfClause) Source() ast.Node { func (x *IfClause) yield(s *compState) { ctx := s.ctx - if ctx.BoolValue(ctx.value(x.Condition, s.state)) { + if ctx.BoolValue(ctx.value(x.Condition, require(s.state, scalarKnown))) { s.yield(ctx.e) } } diff --git a/vendor/cuelang.org/go/internal/core/adt/feature.go b/vendor/cuelang.org/go/internal/core/adt/feature.go index dcfa99bc91..1bcabdb8e8 100644 --- a/vendor/cuelang.org/go/internal/core/adt/feature.go +++ b/vendor/cuelang.org/go/internal/core/adt/feature.go @@ -83,7 +83,7 @@ func (f Feature) SelectorString(index StringIndexer) string { if f == AnyString { return "_" } - return literal.String.Quote(s) + return literal.Label.Quote(s) default: return f.IdentString(index) } diff --git a/vendor/cuelang.org/go/internal/core/adt/fields.go b/vendor/cuelang.org/go/internal/core/adt/fields.go index 29e33a0958..ea40ec1cc8 100644 --- a/vendor/cuelang.org/go/internal/core/adt/fields.go +++ b/vendor/cuelang.org/go/internal/core/adt/fields.go @@ -14,6 +14,10 @@ package adt +import ( + "fmt" +) + // This file holds the logic for the insertion of fields and pattern // constraints, including tracking closedness. // @@ -27,7 +31,7 @@ package adt // // Keeping track of which fields are allowed means keeping provenance data on // whether certain conjuncts originate from embeddings or definitions, as well -// as how they group together with other conjuncts. These data structure should +// as how they group together with other conjuncts. These data structures should // allow for a "mark and unwind" approach to allow for backtracking when // computing disjunctions. // @@ -52,7 +56,7 @@ package adt // Conjuncts // // To keep track of conjunct provenance, each conjunct has a few flags that -// indicates whether it orignates from +// indicates whether it originates from // - an embedding // - a definition // - a reference (optional and unimplemented) @@ -153,10 +157,28 @@ type closeContext struct { // Used to recursively insert Vertices. parent *closeContext + // overlay is used to temporarily link a closeContext to its "overlay" copy, + // as it is used in a corresponding disjunction. + overlay *closeContext + // generation is used to track the current generation of the closeContext + // in disjunction overlays. This is mostly for debugging. + generation int + + dependencies []*ccDep // For testing only. See debug.go + + // externalDeps lists the closeContexts associated with a root node for + // which there are outstanding decrements (can only be NOTIFY or ARC). This + // is used to break counter cycles, if necessary. + // + // This is only used for root closedContext and only for debugging. + // TODO: move to nodeContext. + externalDeps []ccArcRef + // child links to a sequence which additional patterns need to be verified // against (&&). If there are more than one, these additional nodes are // linked with next. Only closed nodes with patterns are added. Arc sets are // already merged during processing. + // A child is always done. This means it cannot be modified. child *closeContext // next holds a linked list of nodes to process. @@ -168,12 +190,33 @@ type closeContext struct { // be an additional increment at the start before any processing is done. conjunctCount int + // disjunctCount counts the number of disjunctions that contribute to + // conjunctCount. When a node is unfinished, for instance due to an error, + // we allow disjunctions to not be decremented. This count is then used + // to suppress errors about missing decrements. + disjunctCount int + src *Vertex + arcType ArcType + // isDef indicates whether the closeContext is created as part of a // definition. isDef bool + // hasEllipsis indicates whether the node contains an ellipsis. + hasEllipsis bool + + // hasTop indicates a node has at least one top conjunct. + hasTop bool + + // hasNonTop indicates a node has at least one conjunct that is not top. + hasNonTop bool + + // isClosedOnce is true if this closeContext is the result of calling the + // close builtin. + isClosedOnce bool + // isEmbed indicates whether the closeContext is created as part of an // embedding. isEmbed bool @@ -189,7 +232,36 @@ type closeContext struct { // values. isTotal bool - Arcs []*Vertex // TODO: also link to parent.src Vertex? + // done is true if all dependencies have been decremented. + done bool + + // isDecremented is used to keep track of whether the evaluator decremented + // a closedContext for the ROOT depKind. + isDecremented bool + + // needsCloseInSchedule is non-nil if a closeContext that was created + // as an arc still needs to be decremented. It points to the creating arc + // for reporting purposes. + needsCloseInSchedule *closeContext + + // parentConjuncts represent the parent of this embedding or definition. + // Any closeContext is represented by a ConjunctGroup in parent of the + // expression tree. + parentConjuncts conjunctGrouper + // TODO: Only needed if more than one conjuncts. + + // arcs represents closeContexts for sub fields and notification targets + // associated with this node that reflect the same point in the expression + // tree as this closeContext. In both cases the are keyed by Vertex. + arcs []ccArc + + // parentIndex is the position in the parent's arcs slice that corresponds + // to this closeContext. This is currently unused. The intention is to use + // this to allow groups with single elements (which will be the majority) + // to be represented in place in the parent. + parentIndex int + + group *ConjunctGroup // Patterns contains all patterns of the current closeContext. // It is used in the construction of Expr. @@ -202,19 +274,202 @@ type closeContext struct { Expr Value } +// Label is a convenience function to return the label of the associated Vertex. +func (c *closeContext) Label() Feature { + return c.src.Label +} + +// See also Vertex.updateArcType in composite.go. +func (c *closeContext) updateArcType(t ArcType) { + if t >= c.arcType { + return + } + if c.arcType == ArcNotPresent { + return + } + c.arcType = t +} + +type ccArc struct { + kind depKind + decremented bool + key *closeContext + cc *closeContext +} + +// A ccArcRef x refers to the x.src.arcs[x.index]. +// We use this instead of pointers, because the address may change when +// growing a slice. We use this instead mechanism instead of a pointers so +// that we do not need to maintain separate free buffers once we use pools of +// closeContext. +type ccArcRef struct { + src *closeContext + index int +} + +type conjunctGrouper interface { + assignConjunct(ctx *OpContext, root *closeContext, c Conjunct, mode ArcType, check, checkClosed bool) (arc *closeContext, pos int, added bool) +} + +func (n *nodeContext) getArc(f Feature, mode ArcType) (arc *Vertex, isNew bool) { + // TODO(disjunct,perf): CopyOnRead + v := n.node + for _, a := range v.Arcs { + if a.Label == f { + if f.IsLet() { + a.MultiLet = true + // TODO: add return here? + } + a.updateArcType(mode) + return a, false + } + } + + arc = &Vertex{ + Parent: v, + Label: f, + ArcType: mode, + nonRooted: v.IsDynamic || v.Label.IsLet() || v.nonRooted, + } + if n.scheduler.frozen&fieldSetKnown != 0 { + b := n.ctx.NewErrf("adding field %v not allowed as field set was already referenced", f) + n.ctx.AddBottom(b) + // This may panic for list arithmetic. Safer to leave out for now. + arc.ArcType = ArcNotPresent + } + v.Arcs = append(v.Arcs, arc) + return arc, true +} + +func (v *Vertex) assignConjunct(ctx *OpContext, root *closeContext, c Conjunct, mode ArcType, check, checkClosed bool) (a *closeContext, pos int, added bool) { + // TODO: consider clearing CloseInfo.cc. + // c.CloseInfo.cc = nil + + arc := root.src + arc.updateArcType(mode) // TODO: probably not necessary: consider removing. + + pos = len(arc.Conjuncts) + + added = !check || !arc.hasConjunct(c) + if added { + c.CloseInfo.cc = root + arc.addConjunctUnchecked(c) + } + + return root, pos, added +} + +func (cc *closeContext) getKeyedCC(ctx *OpContext, key *closeContext, c CycleInfo, mode ArcType, checkClosed bool) *closeContext { + for _, a := range cc.arcs { + if a.key == key { + a.cc.updateArcType(mode) + return a.cc + } + } + + group := &ConjunctGroup{} + + if cc.parentConjuncts == cc { + panic("parent is self") + } + + parent, pos, _ := cc.parentConjuncts.assignConjunct(ctx, key, Conjunct{ + CloseInfo: CloseInfo{ + FromDef: cc.isDef, + FromEmbed: cc.isEmbed, + CycleInfo: c, + }, + x: group, + }, mode, false, checkClosed) + + arc := &closeContext{ + generation: cc.generation, + parent: parent, + parentConjuncts: parent, + parentIndex: pos, + + src: key.src, + arcType: mode, + group: group, + + isDef: cc.isDef, + isEmbed: cc.isEmbed, + needsCloseInSchedule: cc, + } + + arc.parent.incDependent(ctx, PARENT, arc) + + // If the parent, w.r.t. the subfield relation was already processed, + // there is no need to register the notification. + arc.incDependent(ctx, EVAL, cc) // matched in REF(decrement:nodeDone) + + // A let field never depends on its parent. So it is okay to filter here. + if !arc.Label().IsLet() { + // prevent a dependency on self. + if key.src != cc.src { + cc.addDependency(ctx, ARC, key, arc, key) + } + } + + v := key.src + if checkClosed && v.Parent != nil && v.Parent.state != nil { + v.Parent.state.checkArc(cc, v) + } + + return arc +} + +func (cc *closeContext) linkNotify(ctx *OpContext, dst *Vertex, key *closeContext, c CycleInfo) bool { + for _, a := range cc.arcs { + if a.key == key { + return false + } + } + + cc.addDependency(ctx, NOTIFY, key, key, dst.cc) + return true +} + +func (cc *closeContext) assignConjunct(ctx *OpContext, root *closeContext, c Conjunct, mode ArcType, check, checkClosed bool) (arc *closeContext, pos int, added bool) { + arc = cc.getKeyedCC(ctx, root, c.CloseInfo.CycleInfo, mode, checkClosed) + + pos = len(*arc.group) + + c.CloseInfo.cc = nil + added = !check || !hasConjunct(*arc.group, c) + if added { + c.CloseInfo.cc = arc + + if c.CloseInfo.cc.src != arc.src { + panic("Inconsistent src") + } + *arc.group = append(*arc.group, c) + } + + return arc, pos, added +} + // spawnCloseContext wraps the closeContext in c with a new one and returns // this new context along with an updated CloseInfo. The new values reflect // that the set of fields represented by c are now, for instance, enclosed in // an embedding or a definition. // // This call is used when preparing ADT values for evaluation. -func (c CloseInfo) spawnCloseContext(t closeNodeType) (CloseInfo, *closeContext) { - c.cc.incDependent() +func (c CloseInfo) spawnCloseContext(ctx *OpContext, t closeNodeType) (CloseInfo, *closeContext) { + cc := c.cc + if cc == nil { + panic("nil closeContext") + } c.cc = &closeContext{ - parent: c.cc, + generation: cc.generation, + parent: cc, + src: cc.src, + parentConjuncts: cc, } + cc.incDependent(ctx, PARENT, c.cc) // REF(decrement: spawn) + switch t { case closeDef: c.cc.isDef = true @@ -225,34 +480,116 @@ func (c CloseInfo) spawnCloseContext(t closeNodeType) (CloseInfo, *closeContext) return c, c.cc } +// addDependency adds a dependent arc to c. If child is an arc, child.src == key +func (c *closeContext) addDependency(ctx *OpContext, kind depKind, key, child, root *closeContext) { + // NOTE: do not increment + // - either root closeContext or otherwise resulting from sub closeContext + // all conjuncts will be added now, notified, or scheduled as task. + + child.incDependent(ctx, kind, c) // matched in decDependent REF(arcs) + + for _, a := range c.arcs { + if a.key == key { + panic("addArc: Label already exists") + } + } + + // TODO: this tests seems sensible, but panics. Investigate what could + // trigger this. + // if child.src.Parent != c.src { + // panic("addArc: inconsistent parent") + // } + if child.src.cc != root.src.cc { + panic("addArc: inconsistent root") + } + c.arcs = append(c.arcs, ccArc{ + kind: kind, + key: key, + cc: child, + }) + root.externalDeps = append(root.externalDeps, ccArcRef{ + src: c, + index: len(c.arcs) - 1, + }) +} + // incDependent needs to be called for any conjunct or child closeContext // scheduled for c that is queued for later processing and not scheduled // immediately. -func (c *closeContext) incDependent() { +func (c *closeContext) incDependent(ctx *OpContext, kind depKind, dependant *closeContext) (debug *ccDep) { + if c.src == nil { + panic("incDependent: unexpected nil src") + } + if dependant != nil && c.generation != dependant.generation { + // TODO: enable this check. + + // panic(fmt.Sprintf("incDependent: inconsistent generation: %d %d", c.generation, dependant.generation)) + } + debug = c.addDependent(ctx, kind, dependant) + + if c.done { + openDebugGraph(ctx, c.src, "incDependent: already checked") + + panic(fmt.Sprintf("incDependent: already closed: %p", c)) + } + c.conjunctCount++ + return debug } // decDependent needs to be called for any conjunct or child closeContext for // which a corresponding incDependent was called after it has been successfully // processed. -func (c *closeContext) decDependent(n *nodeContext) { +func (c *closeContext) decDependent(ctx *OpContext, kind depKind, dependant *closeContext) { + v := c.src + + c.matchDecrement(ctx, v, kind, dependant) + + if c.conjunctCount == 0 { + panic(fmt.Sprintf("negative reference counter %d %p", c.conjunctCount, c)) + } + c.conjunctCount-- if c.conjunctCount > 0 { return } - c.finalizePattern(n) + c.done = true + + p := c.parent + + if c.isDef && !c.hasEllipsis && (!c.hasTop || c.hasNonTop) { + c.isClosed = true + if p != nil { + p.isDef = true + } + } - if c.isDef { + if c.isClosedOnce { c.isClosed = true + if p != nil { + p.isClosedOnce = true + } } - p := c.parent + for i, a := range c.arcs { + cc := a.cc + if a.decremented { + continue + } + c.arcs[i].decremented = true + cc.decDependent(ctx, a.kind, c) // REF(arcs) + } + + c.finalizePattern() + if p == nil { // Root pattern, set allowed patterns. - if pcs := n.node.PatternConstraints; pcs != nil { + if pcs := v.PatternConstraints; pcs != nil { if pcs.Allowed != nil { - panic("unexpected allowed set") + // This can happen for lists. + // TODO: unify the values. + // panic("unexpected allowed set") } pcs.Allowed = c.Expr return @@ -260,19 +597,76 @@ func (c *closeContext) decDependent(n *nodeContext) { return } - if !c.isEmbed && c.isClosed { + if c.hasEllipsis { + p.hasEllipsis = true + } + if c.hasTop { + p.hasTop = true + } + if c.hasNonTop { + p.hasNonTop = true + } + + switch { + case c.isTotal: + if !p.isClosed { + p.isTotal = true + } + case !c.isEmbed && c.isClosed: // Merge the two closeContexts and ensure that the patterns and fields // are mutually compatible according to the closedness rules. - injectClosed(n, c, p) + injectClosed(ctx, c, p) p.Expr = mergeConjunctions(p.Expr, c.Expr) - } else { + default: // Do not check closedness of fields for embeddings. // The pattern constraints of the embedding still need to be added // to the current context. p.linkPatterns(c) } - p.decDependent(n) + p.decDependent(ctx, PARENT, c) // REF(decrement: spawn) + + // If we have started decrementing a child closeContext, the parent started + // as well. If it is still marked as needing an EVAL decrement, which can + // happen if processing started before the node was added, it is safe to + // decrement it now. In this case the NOTIFY and ARC dependencies will keep + // the nodes alive until they can be completed. + if dep := p.needsCloseInSchedule; dep != nil { + p.needsCloseInSchedule = nil + p.decDependent(ctx, EVAL, dep) + } +} + +// incDisjunct increases disjunction-related counters. We require kind to be +// passed explicitly so that we can easily find the points where certain kinds +// are used. +func (c *closeContext) incDisjunct(ctx *OpContext, kind depKind) { + if kind != DISJUNCT { + panic("unexpected kind") + } + c.incDependent(ctx, DISJUNCT, nil) + + // TODO: the counters are only used in debug mode and we could skip this + // if debug is disabled. + for ; c != nil; c = c.parent { + c.disjunctCount++ + } +} + +// decDisjunct decreases disjunction-related counters. We require kind to be +// passed explicitly so that we can easily find the points where certain kinds +// are used. +func (c *closeContext) decDisjunct(ctx *OpContext, kind depKind) { + if kind != DISJUNCT { + panic("unexpected kind") + } + c.decDependent(ctx, DISJUNCT, nil) + + // TODO: the counters are only used in debug mode and we could skip this + // if debug is disabled. + for ; c != nil; c = c.parent { + c.disjunctCount++ + } } // linkPatterns merges the patterns of child into c, if needed. @@ -283,110 +677,193 @@ func (c *closeContext) linkPatterns(child *closeContext) { } } +// checkArc validates that the node corresponding to cc allows a field with +// label v.Label. +func (n *nodeContext) checkArc(cc *closeContext, v *Vertex) *Vertex { + n.assertInitialized() + + f := v.Label + ctx := n.ctx + + if f.IsHidden() || f.IsLet() { + return v + } + + if cc.isClosed && !matchPattern(ctx, cc.Expr, f) { + ctx.notAllowedError(n.node, v) + } + if n.scheduler.frozen&fieldSetKnown != 0 { + for _, a := range n.node.Arcs { + if a.Label == f { + return v + } + } + var b *Bottom + // TODO: include cycle data and improve error message. + if f.IsInt() { + b = ctx.NewErrf( + "element at index %v not allowed by earlier comprehension or reference cycle", f) + } else { + b = ctx.NewErrf( + "field %v not allowed by earlier comprehension or reference cycle", f) + } + v.SetValue(ctx, b) + } + + return v +} + +// insertConjunct inserts conjunct c into cc. +func (cc *closeContext) insertConjunct(ctx *OpContext, key *closeContext, c Conjunct, id CloseInfo, mode ArcType, check, checkClosed bool) (arc *closeContext, added bool) { + arc, _, added = cc.assignConjunct(ctx, key, c, mode, check, checkClosed) + if key.src != arc.src { + panic("inconsistent src") + } + + if !added { + return + } + + n := key.src.getBareState(ctx) + if n == nil { + // already done + return + } + + if key.src.isInProgress() { + c.CloseInfo.cc = nil + id.cc = arc + n.scheduleConjunct(c, id) + } + + for _, rec := range n.notify { + if mode == ArcPending { + panic("unexpected pending arc") + } + // TODO: we should probably only notify a conjunct once the root of the + // conjunct group is completed. This will make it easier to "stitch" the + // conjunct trees together, as its correctness will be guaranteed. + cc.insertConjunct(ctx, rec.cc, c, id, mode, check, checkClosed) + } + + return +} + +func (n *nodeContext) insertArc(f Feature, mode ArcType, c Conjunct, id CloseInfo, check bool) *Vertex { + v, _ := n.insertArcCC(f, mode, c, id, check) + return v +} + // insertArc inserts conjunct c into n. If check is true it will not add c if it // was already added. -func (n *nodeContext) insertArc(f Feature, mode ArcType, c Conjunct, check bool) { +// Returns the arc of n.node with label f. +func (n *nodeContext) insertArcCC(f Feature, mode ArcType, c Conjunct, id CloseInfo, check bool) (*Vertex, *closeContext) { + n.assertInitialized() + if n == nil { panic("nil nodeContext") } if n.node == nil { panic("nil node") } - cc := c.CloseInfo.cc + cc := id.cc if cc == nil { panic("nil closeContext") } - if _, isNew := n.insertArc1(f, mode, c, check); !isNew { - return // Patterns were already added. + v, insertedArc := n.getArc(f, mode) + + defer n.ctx.PopArc(n.ctx.PushArc(v)) + + // TODO: this block is not strictly needed. Removing it slightly changes the + // paths at which errors are reported, arguably, but not clearly, for the + // better. Investigate this once the new evaluator is done. + if v.ArcType == ArcNotPresent { + // It was already determined before that this arc may not be present. + // This case can only manifest itself if we have a cycle. + n.node.reportFieldCycleError(n.ctx, pos(c.x), f) + return v, nil + } + + if v.cc == nil { + v.cc = v.rootCloseContext(n.ctx) + v.cc.generation = n.node.cc.generation + } + + arc, added := cc.insertConjunct(n.ctx, v.cc, c, id, mode, check, true) + if !added { + return v, arc + } + + if !insertedArc { + return v, arc } // Match and insert patterns. if pcs := n.node.PatternConstraints; pcs != nil { for _, pc := range pcs.Pairs { - if matchPattern(n, pc.Pattern, f) { + if matchPattern(n.ctx, pc.Pattern, f) { for _, c := range pc.Constraint.Conjuncts { - n.insertArc1(f, mode, c, check) + // TODO: consider using the root cc, but probably does not + // matter. + // This is necessary if we defunct tasks, but otherwise not. + // It breaks the CloseContext tests, though. + // c.CloseInfo.cc = id.cc + n.addConstraint(v, mode, c, check) } } } } -} - -// insertArc1 inserts conjunct c into its associated closeContext. If the -// closeContext did not yet have a Vertex for f, it is created and it is ensured -// that the grouping is associated with all the parent closeContexts. If it is -// newly added to the root closeContext, the outer grouping is also added to -// n.node, the top-level Vertex itself. -// -// insertArc1 is exclusively used by insertArc to insert conjuncts for regular -// fields as well as pattern constraints. -func (n *nodeContext) insertArc1(f Feature, mode ArcType, c Conjunct, check bool) (v *Vertex, isNew bool) { - cc := c.CloseInfo.cc - - // Locate or create the arc in the current context. - v, isNew = cc.insertArc(n, f, mode, c, check) - if !isNew { - return v, false - } - - i := cc.parent - for prev := cc; i != nil && isNew; prev, i = i, i.parent { - vc := MakeRootConjunct(nil, v) - vc.CloseInfo.FromDef = prev.isDef - vc.CloseInfo.FromEmbed = prev.isEmbed - v, isNew = i.insertArc(n, f, mode, vc, check) - } - if isNew && i == nil { - n.node.Arcs = append(n.node.Arcs, v) - } - if cc.isClosed && !v.disallowedField && !matchPattern(n, cc.Expr, f) { - n.notAllowedError(f) - } - - return v, isNew + return v, arc } -// insertArc is exclusively called from nodeContext.insertArc1 and is used to -// insert a conjunct in closeContext, along with other conjuncts from the same -// origin. It does not recursively insert conjuncts into parent closeContexts, -// which is done by insertArc1. +// addConstraint adds a constraint to arc of n. // -// If check is true it will not add c if it was already added. -func (cc *closeContext) insertArc( - n *nodeContext, f Feature, mode ArcType, c Conjunct, check bool) (v *Vertex, isNew bool) { - c.CloseInfo.cc = nil +// In order to resolve LabelReferences, it is not always possible to walk up +// the parent Vertex chain to determan the label, because a label reference +// may point past a point of referral. For instance, +// +// test: [ID=_]: name: ID +// test: A: {} +// B: test.A & {} // B.name should be "A", not "B". +// +// The arc must be the node arc to which the conjunct is added. +func (n *nodeContext) addConstraint(arc *Vertex, mode ArcType, c Conjunct, check bool) { + n.assertInitialized() - for _, a := range cc.Arcs { - if a.Label != f { - continue - } + // TODO(perf): avoid cloning the Environment, if: + // - the pattern constraint has no LabelReference + // (require compile-time support) + // - there are no references in the conjunct pointing to this node. + // - consider adding this value to the Conjunct struct + f := arc.Label + bulkEnv := *c.Env + bulkEnv.DynamicLabel = f + c.Env = &bulkEnv - if f.IsLet() { - a.MultiLet = true - return a, false - } - if check { - a.AddConjunct(c) - } else { - a.addConjunctUnchecked(c) - } - // TODO: possibly add positions to error. - return a, false - } + // TODO(constraintNode): this should ideally be + // cc := id.cc + // or + // cc := c.CloseInfo.cc.src.cc + // + // Where id is the closeContext corresponding to the field, or the root + // context. But it is a bit hard to figure out how to account for this, as + // either this information is not available or the root context results in + // errors for the other use of addConstraint. For this reason, we keep + // things symmetric for now and will keep things as is, just avoiding the + // closedness check. + cc := c.CloseInfo.cc - v = &Vertex{Parent: cc.src, Label: f, ArcType: mode} - if mode == ArcPending { - cc.src.hasPendingArc = true - } - v.Conjuncts = append(v.Conjuncts, c) - cc.Arcs = append(cc.Arcs, v) + arc, _ = n.getArc(f, mode) - return v, true + root := arc.rootCloseContext(n.ctx) + cc.insertConjunct(n.ctx, root, c, c.CloseInfo, mode, check, false) } func (n *nodeContext) insertPattern(pattern Value, c Conjunct) { + n.assertInitialized() + ctx := n.ctx cc := c.CloseInfo.cc @@ -405,9 +882,9 @@ func (n *nodeContext) insertPattern(pattern Value, c Conjunct) { // from arcs and patterns are grouped under the same vertex. // TODO: verify. See test Pattern 1b for _, a := range n.node.Arcs { - if matchPattern(n, pattern, a.Label) { + if matchPattern(n.ctx, pattern, a.Label) { // TODO: is it necessary to check for uniqueness here? - n.insertArc(a.Label, a.ArcType, c, true) + n.addConstraint(a, a.ArcType, c, true) } } @@ -448,21 +925,44 @@ func isTotal(p Value) bool { // It first ensures that the fields contained in dst are allowed by the fields // and patterns defined in closed. It reports an error in the nodeContext if // this is not the case. -func injectClosed(n *nodeContext, closed, dst *closeContext) { +func injectClosed(ctx *OpContext, closed, dst *closeContext) { // TODO: check that fields are not void arcs. outer: - for _, a := range dst.Arcs { - for _, b := range closed.Arcs { - if a.Label == b.Label { - if b.disallowedField { - // Error was already reported. - break - } + for _, a := range dst.arcs { + if a.kind != ARC { + continue + } + ca := a.cc + f := ca.Label() + switch ca.src.ArcType { + case ArcMember, ArcRequired: + case ArcOptional, ArcNotPresent: + // Without this continue, an evaluation error may be propagated to + // parent nodes that are otherwise allowed. + continue + case ArcPending: + // TODO: Need to evaluate? + default: + panic("unreachable") + } + // TODO: disallow new definitions in closed structs. + if f.IsHidden() || f.IsLet() || f.IsDef() { + continue + } + for _, b := range closed.arcs { + cb := b.cc + // TODO: we could potentially remove the check for ArcPending if we + // explicitly set the arcType to ArcNonPresent when a comprehension + // yields no results. + if cb.arcType == ArcNotPresent || cb.arcType == ArcPending { + continue + } + if f == cb.Label() { continue outer } } - if !matchPattern(n, closed.Expr, a.Label) { - n.notAllowedError(a.Label) + if !matchPattern(ctx, closed.Expr, ca.Label()) { + ctx.notAllowedError(closed.src, ca.src) continue } } @@ -480,16 +980,52 @@ outer: } } -// notAllowedError reports a "field not allowed" error in n and sets the value +func (ctx *OpContext) addPositions(c Conjunct) { + if x, ok := c.x.(*ConjunctGroup); ok { + for _, c := range *x { + ctx.addPositions(c) + } + } + if pos := c.Field(); pos != nil { + ctx.AddPosition(pos) + } +} + +// notAllowedError reports a field not allowed error in n and sets the value // for arc f to that error. -func (n *nodeContext) notAllowedError(f Feature) { - // Set the error on the same arc as the old implementation - // and using the same path. - arc := n.node.Lookup(f) - v := n.ctx.PushArc(arc) - n.node.SetValue(n.ctx, n.ctx.NewErrf("field not allowed")) - arc.disallowedField = true // Is this necessary? - n.ctx.PopArc(v) +func (ctx *OpContext) notAllowedError(v, arc *Vertex) { + defer ctx.PopArc(ctx.PushArc(arc)) + + defer ctx.ReleasePositions(ctx.MarkPositions()) + + for _, c := range arc.Conjuncts { + ctx.addPositions(c) + } + // TODO(0.7): Find another way to get this provenance information. Not + // currently stored in new evaluator. + // for _, s := range x.Structs { + // s.AddPositions(ctx) + // } + + // TODO: use the arcType from the closeContext. + if arc.ArcType == ArcPending { + // arc.ArcType = ArcNotPresent + // We do not know yet whether the arc will be present or not. Checking + // this will be deferred until this is known, after the comprehension + // has been evaluated. + return + } + // TODO: setting arc instead of n.node eliminates subfields. This may be + // desirable or not, but it differs, at least from <=v0.6 behavior. + arc.SetValue(ctx, ctx.NewErrf("field not allowed")) + if arc.state != nil { + arc.state.kind = 0 + } + + // TODO: remove? We are now setting it on both fields, which seems to be + // necessary for now. But we should remove this as it often results in + // a duplicate error. + // v.SetValue(ctx, ctx.NewErrf("field not allowed")) // TODO: create a special kind of error that gets the positions // of the relevant locations upon request from the arc. @@ -533,7 +1069,7 @@ func mergeConjunctions(a, b Value) Value { // children is closed, the result will be a conjunction of all these closed // values. Otherwise it will be a disjunction of all its children. A nil value // represents all values. -func (c *closeContext) finalizePattern(n *nodeContext) { +func (c *closeContext) finalizePattern() { switch { case c.Expr != nil: // Patterns and expression are already set. if !c.isClosed { diff --git a/vendor/cuelang.org/go/internal/core/adt/op.go b/vendor/cuelang.org/go/internal/core/adt/op.go index 6383290ab6..dfa83cbbfe 100644 --- a/vendor/cuelang.org/go/internal/core/adt/op.go +++ b/vendor/cuelang.org/go/internal/core/adt/op.go @@ -20,79 +20,46 @@ import "cuelang.org/go/cue/token" // use to evaluate a value. type Op int -func (o Op) String() string { - return opToString[o] -} +//go:generate go run golang.org/x/tools/cmd/stringer -type=Op -linecomment // Values of Op. const ( NoOp Op = iota - AndOp - OrOp - - SelectorOp - IndexOp - SliceOp - CallOp - - BoolAndOp - BoolOrOp - - EqualOp - NotOp - NotEqualOp - LessThanOp - LessEqualOp - GreaterThanOp - GreaterEqualOp - - MatchOp - NotMatchOp - - AddOp - SubtractOp - MultiplyOp - FloatQuotientOp - IntQuotientOp - IntRemainderOp - IntDivideOp - IntModuloOp - - InterpolationOp + AndOp // & + OrOp // | + + SelectorOp // . + IndexOp // [] + SliceOp // [:] + CallOp // () + + BoolAndOp // && + BoolOrOp // || + + EqualOp // == + NotOp // ! + NotEqualOp // != + LessThanOp // < + LessEqualOp // <= + GreaterThanOp // > + GreaterEqualOp // >= + + MatchOp // =~ + NotMatchOp // !~ + + AddOp // + + SubtractOp // - + MultiplyOp // * + FloatQuotientOp // / + IntQuotientOp // quo + IntRemainderOp // rem + IntDivideOp // div + IntModuloOp // mod + + InterpolationOp // \() ) -var opToString = map[Op]string{ - AndOp: "&", - OrOp: "|", - BoolAndOp: "&&", - BoolOrOp: "||", - EqualOp: "==", - NotOp: "!", - NotEqualOp: "!=", - LessThanOp: "<", - LessEqualOp: "<=", - GreaterThanOp: ">", - GreaterEqualOp: ">=", - MatchOp: "=~", - NotMatchOp: "!~", - AddOp: "+", - SubtractOp: "-", - MultiplyOp: "*", - FloatQuotientOp: "/", - IntQuotientOp: "quo", - IntRemainderOp: "rem", - IntDivideOp: "div", - IntModuloOp: "mod", - - SelectorOp: ".", - IndexOp: "[]", - SliceOp: "[:]", - CallOp: "()", - - InterpolationOp: `\()`, -} - // OpFromToken converts a token.Token to an Op. func OpFromToken(t token.Token) Op { return tokenMap[t] diff --git a/vendor/cuelang.org/go/internal/core/adt/op_string.go b/vendor/cuelang.org/go/internal/core/adt/op_string.go new file mode 100644 index 0000000000..1fedcaed84 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/op_string.go @@ -0,0 +1,49 @@ +// Code generated by "stringer -type=Op -linecomment"; DO NOT EDIT. + +package adt + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoOp-0] + _ = x[AndOp-1] + _ = x[OrOp-2] + _ = x[SelectorOp-3] + _ = x[IndexOp-4] + _ = x[SliceOp-5] + _ = x[CallOp-6] + _ = x[BoolAndOp-7] + _ = x[BoolOrOp-8] + _ = x[EqualOp-9] + _ = x[NotOp-10] + _ = x[NotEqualOp-11] + _ = x[LessThanOp-12] + _ = x[LessEqualOp-13] + _ = x[GreaterThanOp-14] + _ = x[GreaterEqualOp-15] + _ = x[MatchOp-16] + _ = x[NotMatchOp-17] + _ = x[AddOp-18] + _ = x[SubtractOp-19] + _ = x[MultiplyOp-20] + _ = x[FloatQuotientOp-21] + _ = x[IntQuotientOp-22] + _ = x[IntRemainderOp-23] + _ = x[IntDivideOp-24] + _ = x[IntModuloOp-25] + _ = x[InterpolationOp-26] +} + +const _Op_name = "NoOp&|.[][:]()&&||==!!=<<=>>==~!~+-*/quoremdivmod\\()" + +var _Op_index = [...]uint8{0, 4, 5, 6, 7, 9, 12, 14, 16, 18, 20, 21, 23, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 40, 43, 46, 49, 52} + +func (i Op) String() string { + if i < 0 || i >= Op(len(_Op_index)-1) { + return "Op(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Op_name[_Op_index[i]:_Op_index[i+1]] +} diff --git a/vendor/cuelang.org/go/internal/core/adt/overlay.go b/vendor/cuelang.org/go/internal/core/adt/overlay.go new file mode 100644 index 0000000000..93554c8465 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/overlay.go @@ -0,0 +1,515 @@ +// Copyright 2024 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import "slices" + +// This file implements a Vertex overlay. This is used by the disjunction +// algorithm to fork an existing Vertex value without modifying the original. +// +// At the moment, the forked value is a complete copy of the original. +// The copy points to the original to keep track of pointer equivalence. +// Conversely, while a copy is evaluated, the value of which it is a copy +// references the copy. Dereferencing will then take care that the copy is used +// during evaluation. +// +// nodeContext (main) <- +// - deref \ +// | \ +// | nodeContext (d1) | <- +// \ - overlays -------/ \ +// \ \ +// -> nodeContext (d2) | +// - overlays --------/ +// +// TODO: implement dereferencing +// TODO(perf): implement copy on write: instead of copying the entire tree, we +// could get by with only copying arcs to that are modified in the copy. + +var nextGeneration int + +func newOverlayContext(ctx *OpContext) *overlayContext { + nextGeneration++ + return &overlayContext{ctx: ctx, generation: nextGeneration} +} + +// An overlayContext keeps track of copied vertices, closeContexts, and tasks. +// This allows different passes to know which of each were created, without +// having to walk the entire tree. +type overlayContext struct { + ctx *OpContext + + // generation is used to identify the current overlayContext. All + // closeContexts created by this overlayContext will have this generation. + // Whenever a counter of a closedContext is changed, this may only cause + // a cascade of changes if the generation is the same. + generation int + + // closeContexts holds the allocated closeContexts created by allocCC. + // + // In the first pass, closeContexts are copied using allocCC. This also + // walks the parent tree, and allocates copies for ConjunctGroups. + // + // In the second pass, initCloneCC can be finalized by initializing each + // closeContext in this slice. + // + // Note that after the copy is completed, the overlay pointer should be + // deleted. + closeContexts []*closeContext + + // vertices holds the original, non-overlay vertices. The overlay for a + // vertex v can be obtained by looking up v.cc.overlay.src. + vertices []*Vertex +} + +// cloneRoot clones the a Vertex in which disjunctions are defined to allow +// inserting selected disjuncts into a new Vertex. +func (ctx *overlayContext) cloneRoot(root *nodeContext) *nodeContext { + // Clone all vertices that need to be cloned to support the overlay. + v := ctx.cloneVertex(root.node) + v.IsDisjunct = true + + // TODO: patch notifications to any node that is within the disjunct to + // point to the new vertex instead. + + // Initialize closeContexts: at this point, all closeContexts that need to + // be cloned have been allocated and stored in closeContexts and can now be + // initialized. + for _, cc := range ctx.closeContexts { + ctx.initCloneCC(cc) + } + + // TODO: walk overlay vertices and decrement counters of non-disjunction + // running tasks? + // TODO: find a faster way to do this. Walking over vertices would + // probably be faster. + for _, cc := range ctx.closeContexts { + for _, d := range cc.dependencies { + if d.task == nil { + // The test case that makes this necessary: + // #A: ["a" | "b"] | {} + // #A: ["a" | "b"] | {} + // b: #A & ["b"] + // + // TODO: invalidate task instead? + continue + } + if d.kind == TASK && d.task.state == taskRUNNING && !d.task.defunct { + cc.overlay.decDependent(ctx.ctx, TASK, nil) + } + } + } + + return v.state +} + +// unlinkOverlay unlinks helper pointers. This should be done after the +// evaluation of a disjunct is complete. Keeping the linked pointers around +// will allow for dereferencing a vertex to its overlay, which, in turn, +// allows a disjunct to refer to parents vertices of the disjunct that +// recurse into the disjunct. +// +// TODO(perf): consider using generation counters. +func (ctx *overlayContext) unlinkOverlay() { + for _, cc := range ctx.closeContexts { + cc.overlay = nil + } +} + +// cloneVertex copies the contents of x into a new Vertex. +// +// It copies all Arcs, Conjuncts, and Structs, recursively. +// +// TODO(perf): it would probably be faster to copy vertices on demand. But this +// is more complicated and it would be worth measuring how much of a performance +// benefit this gives. More importantly, we should first implement the filter +// to eliminate disjunctions pre-copy based on discriminator fields and what +// have you. This is not unlikely to eliminate +func (ctx *overlayContext) cloneVertex(x *Vertex) *Vertex { + xcc := x.rootCloseContext(ctx.ctx) // may be uninitialized for constraints. + if o := xcc.overlay; o != nil && o.src != nil { + // This path could happen with structure sharing or user-constructed + // values. + return o.src + } + + v := &Vertex{} + *v = *x + + ctx.vertices = append(ctx.vertices, v) + + v.cc = ctx.allocCC(x.cc) + + v.cc.src = v + v.cc.parentConjuncts = v + v.Conjuncts = *v.cc.group + + if a := x.Arcs; len(a) > 0 { + // TODO(perf): reuse buffer. + v.Arcs = make([]*Vertex, len(a)) + for i, arc := range a { + // TODO(perf): reuse when finalized. + arc := ctx.cloneVertex(arc) + v.Arcs[i] = arc + arc.Parent = v + } + } + + v.Structs = slices.Clone(v.Structs) + + if pc := v.PatternConstraints; pc != nil { + npc := &Constraints{Allowed: pc.Allowed} + v.PatternConstraints = npc + + npc.Pairs = make([]PatternConstraint, len(pc.Pairs)) + for i, p := range pc.Pairs { + npc.Pairs[i] = PatternConstraint{ + Pattern: p.Pattern, + Constraint: ctx.cloneVertex(p.Constraint), + } + } + } + + if v.state != nil { + v.state = ctx.cloneNodeContext(x.state) + v.state.node = v + + ctx.cloneScheduler(v.state, x.state) + } + + return v +} + +func (ctx *overlayContext) cloneNodeContext(n *nodeContext) *nodeContext { + if !n.isInitialized { + panic("unexpected uninitialized node") + + } + d := n.ctx.newNodeContext(n.node) + d.underlying = n.underlying + if n.underlying == nil { + panic("unexpected nil underlying") + } + + d.refCount++ + + d.ctx = n.ctx + d.node = n.node + + d.nodeContextState = n.nodeContextState + + d.arcMap = append(d.arcMap, n.arcMap...) + d.checks = append(d.checks, n.checks...) + + // TODO: do we need to add cyclicConjuncts? Typically, cyclicConjuncts + // gets cleared at the end of a unify call. There are cases, however, where + // this is possible. We should decide whether cyclicConjuncts should be + // forced to be processed in the parent node, or that we allow it to be + // copied to the disjunction. By taking no action here, we assume it is + // processed in the parent node. Investigate whether this always will lead + // to correct results. + // d.cyclicConjuncts = append(d.cyclicConjuncts, n.cyclicConjuncts...) + + if len(n.disjunctions) > 0 { + for _, de := range n.disjunctions { + // Do not clone cc, as it is identified by underlying. We only need + // to clone the cc in disjunctCCs. + // de.cloneID.cc = ctx.allocCC(de.cloneID.cc) + d.disjunctions = append(d.disjunctions, de) + } + for _, h := range n.disjunctCCs { + h.cc = ctx.allocCC(h.cc) + d.disjunctCCs = append(d.disjunctCCs, h) + } + } + + return d +} + +// cloneConjunct prepares a tree of conjuncts for copying by first allocating +// a clone for each closeContext. +func (ctx *overlayContext) copyConjunct(c Conjunct) Conjunct { + cc := c.CloseInfo.cc + if cc == nil { + return c + } + // TODO: see if we can avoid this allocation. It seems that this should + // not be necessary, and evaluation attains correct results without it. + // Removing this, though, will cause some of the assertions to fail. These + // assertions are overly strict and could be relaxed, but keeping them as + // they are makes reasoning about them easier. + overlay := ctx.allocCC(cc) + c.CloseInfo.cc = overlay + return c +} + +// Phase 1: alloc +func (ctx *overlayContext) allocCC(cc *closeContext) *closeContext { + // TODO(perf): if the original is "done", it can no longer be modified and + // we can use the original, even if the values will not be correct. + if cc.overlay != nil { + return cc.overlay + } + + o := &closeContext{generation: ctx.generation} + cc.overlay = o + + if cc.parent != nil { + o.parent = ctx.allocCC(cc.parent) + } + + // Copy the conjunct group if it exists. + if cc.group != nil { + // Copy the group of conjuncts. + g := make([]Conjunct, len(*cc.group)) + o.group = (*ConjunctGroup)(&g) + for i, c := range *cc.group { + g[i] = ctx.copyConjunct(c) + } + + if o.parent != nil { + // validate invariants + ca := *cc.parent.group + if ca[cc.parentIndex].x != cc.group { + panic("group misaligned") + } + + (*o.parent.group)[cc.parentIndex].x = o.group + } + } + + // This must come after allocating the parent so that we can always read + // the src vertex from the parent during initialization. This assumes that + // src is set in the root closeContext when cloning a vertex. + ctx.closeContexts = append(ctx.closeContexts, cc) + + // needsCloseInSchedule is used as a boolean. The pointer to the original + // closeContext is just used for reporting purposes. + if cc.needsCloseInSchedule != nil { + o.needsCloseInSchedule = ctx.allocCC(cc.needsCloseInSchedule) + } + + // We only explicitly tag dependencies of type ARC. Notifications that + // point within the disjunct overlay will be tagged elsewhere. + for _, a := range cc.arcs { + if a.kind == ARC { + ctx.allocCC(a.cc) + } + } + + return o +} + +func (ctx *overlayContext) initCloneCC(x *closeContext) { + o := x.overlay + + if p := x.parent; p != nil { + o.parent = p.overlay + o.src = o.parent.src + } + + o.conjunctCount = x.conjunctCount + o.disjunctCount = x.disjunctCount + o.isDef = x.isDef + o.hasEllipsis = x.hasEllipsis + o.hasTop = x.hasTop + o.hasNonTop = x.hasNonTop + o.isClosedOnce = x.isClosedOnce + o.isEmbed = x.isEmbed + o.isClosed = x.isClosed + o.isTotal = x.isTotal + o.done = x.done + o.isDecremented = x.isDecremented + o.parentIndex = x.parentIndex + o.Expr = x.Expr + o.Patterns = append(o.Patterns, x.Patterns...) + + // child and next always point to completed closeContexts. Moreover, only + // fields that are immutable, such as Expr, are used. It is therefore not + // necessary to use overlays. + o.child = x.child + if x.child != nil && x.child.overlay != nil { + // TODO: there seem to be situations where this is possible after all. + // See if this is really true, and we should remove this panic, or if + // this underlies a bug of sorts. + // panic("unexpected overlay in child") + } + o.next = x.next + if x.next != nil && x.next.overlay != nil { + panic("unexpected overlay in next") + } + + for _, d := range x.dependencies { + if d.decremented { + continue + } + + if d.dependency.overlay == nil { + // This dependency is irrelevant for the current overlay. We can + // eliminate it as long as we decrement the accompanying counter. + if o.conjunctCount < 2 { + // This node can only be relevant if it has at least one other + // dependency. Check that we are not decrementing the counter + // to 0. + // TODO: this currently panics for some tests. Disabling does + // not seem to harm, though. Reconsider whether this is an issue. + // panic("unexpected conjunctCount: must be at least 2") + } + o.conjunctCount-- + continue + } + + dep := d.dependency + if dep.overlay != nil { + dep = dep.overlay + } + o.dependencies = append(o.dependencies, &ccDep{ + dependency: dep, + kind: d.kind, + decremented: false, + }) + } + + switch p := x.parentConjuncts.(type) { + case *closeContext: + if p.overlay == nil { + panic("expected overlay") + } + o.parentConjuncts = p.overlay + + case *Vertex: + o.parentConjuncts = o.src + } + + if o.src == nil { + // fall back to original vertex. + // FIXME: this is incorrect, as it may lead to evaluating nodes that + // are not part of the disjunction with values of the disjunction. + // TODO: try eliminating EVAL dependencies of arcs that are the parent + // of the disjunction root. + o.src = x.src + } + + if o.parentConjuncts == nil { + panic("expected parentConjuncts") + } + + for _, a := range x.arcs { + // If an arc does not have an overlay, we should not decrement the + // dependency counter. We simply remove the dependency in that case. + if a.cc.overlay == nil { + continue + } + if a.key.overlay != nil { + a.key = a.key.overlay // TODO: is this necessary? + } + a.cc = a.cc.overlay + o.arcs = append(o.arcs, a) + } + + // NOTE: copying externalDeps is hard and seems unnecessary, as it needs to + // be resolved in the base anyway. +} + +func (ctx *overlayContext) cloneScheduler(dst, src *nodeContext) { + ss := &src.scheduler + ds := &dst.scheduler + + ds.state = ss.state + ds.completed = ss.completed + ds.needs = ss.needs + ds.provided = ss.provided + ds.frozen = ss.frozen + ds.isFrozen = ss.isFrozen + ds.counters = ss.counters + + ss.blocking = ss.blocking[:0] + + for _, t := range ss.tasks { + switch t.state { + case taskWAITING: + // Do not unblock previously blocked tasks, unless they are + // associated with this node. + // TODO: an edge case is when a task is blocked on another node + // within the same disjunction. We could solve this by associating + // each nodeContext with a unique ID (like a generation counter) for + // the disjunction. + if t.node != src || t.blockedOn != ss { + break + } + t.defunct = true + t := ctx.cloneTask(t, ds, ss) + ds.tasks = append(ds.tasks, t) + ds.blocking = append(ds.blocking, t) + ctx.ctx.blocking = append(ctx.ctx.blocking, t) + + case taskREADY: + t.defunct = true + t := ctx.cloneTask(t, ds, ss) + ds.tasks = append(ds.tasks, t) + + case taskRUNNING: + if t.run != handleResolver { + // TODO: consider whether this is also necessary for other + // types of tasks. + break + } + + t.defunct = true + t := ctx.cloneTask(t, ds, ss) + t.state = taskREADY + ds.tasks = append(ds.tasks, t) + } + } +} + +func (ctx *overlayContext) cloneTask(t *task, dst, src *scheduler) *task { + if t.node != src.node { + panic("misaligned node") + } + + id := t.id + if id.cc != nil { + id.cc = ctx.allocCC(t.id.cc) // TODO: may be nil for disjunctions. + } + + // TODO: alloc from buffer. + d := &task{ + run: t.run, + state: t.state, + completes: t.completes, + unblocked: t.unblocked, + blockCondition: t.blockCondition, + err: t.err, + env: t.env, + x: t.x, + id: id, + + node: dst.node, + + // TODO: need to copy closeContexts? + comp: t.comp, + leaf: t.leaf, + } + + if t.blockedOn != nil { + if t.blockedOn != src { + panic("invalid scheduler") + } + d.blockedOn = dst + } + + return d +} diff --git a/vendor/cuelang.org/go/internal/core/adt/prof.go b/vendor/cuelang.org/go/internal/core/adt/prof.go index 3036b273fd..9b41210108 100644 --- a/vendor/cuelang.org/go/internal/core/adt/prof.go +++ b/vendor/cuelang.org/go/internal/core/adt/prof.go @@ -28,6 +28,13 @@ var ( countsMu sync.Mutex ) +// ResetStats sets the global stats counters to zero. +func ResetStats() { + countsMu.Lock() + counts = stats.Counts{} + countsMu.Unlock() +} + // AddStats adds the stats of the given OpContext to the global // counters. func AddStats(ctx *OpContext) { diff --git a/vendor/cuelang.org/go/internal/core/adt/sched.go b/vendor/cuelang.org/go/internal/core/adt/sched.go new file mode 100644 index 0000000000..8b0f224ba0 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/sched.go @@ -0,0 +1,752 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "math/bits" +) + +// The CUE scheduler schedules tasks for evaluation. +// +// A task is a computation unit associated with a single node. Each task may +// depend on knowing certain properties of one or more fields, namely: +// +// - whether the field exists +// - the scalar value of a field, if any +// - the set of all conjuncts +// - the set of all sub fields +// - the recursively evaluated value +// +// Each task, in turn, may mark itself as providing knowledge about one or more +// of these properties. If it is not known upfront whether a task may contribute +// to a certain property, it must mark itself as (potentially) contributing to +// this property. +// +// +// DEPENDENCY GRAPH +// +// A task may depend on zero or more fields, including the field for which it +// is defined. The graph of all dependencies is defined as follows: +// +// - Each task and each pair is a node in the graph. +// - A task T for field F that (possibly) computes property P for F is +// represented by an edge from to T. +// - A task T for field F that depends on property P of field G is represented +// by an edge from to T. +// +// It is an evaluation cycle for a task T if there is a path from any task T to +// itself in the dependency graph. Processing will stop in the even of such a +// cycle. In such case, the scheduler will commence an unblocking mechanism. +// +// As a general rule, once a node is detected to be blocking, it may no longer +// become more specific. In other words, it is "frozen". +// The unblocking consists of two phases: the scheduler will first freeze and +// unblock all blocked nodes for the properties marked as autoUnblock-ing in +// taskContext. Subsequently all tasks that are unblocked by this will run. +// In the next phase all remaining tasks are unblocked. +// See taskContext.autoUnblock for more information. +// +// Note that some tasks, like references, may depend on other fields without +// requiring a certain property. These do not count as dependencies. + +// A taskContext manages the task memory and task stack. +// It is typically associated with an OpContext. +type taskContext struct { + // stack tracks the current execution of tasks. This is a stack as tasks + // may trigger the evaluation of other tasks to complete. + stack []*task + + // blocking lists all tasks that were blocked during a round of evaluation. + // Evaluation finalized one node at a time, which includes the evaluation + // of all nodes necessary to evaluate that node. Any task that is blocked + // during such a round of evaluation is recorded here. Any mutual cycles + // will result in unresolved tasks. At the end of such a round, computation + // can be frozen and the tasks unblocked. + blocking []*task + + // counterMask marks which conditions use counters. Other conditions are + // handled by signals only. + counterMask condition + + // autoUnblock marks the flags that get unblocked automatically when there + // is a deadlock between nodes. These are properties that may become + // meaningful once it is known that a value may not become more specific. + // An example of this is the property "scalar". If something is not a scalar + // yet, and it is known that the value may never become more specific, it is + // known that this value is never will become a scalar, thus effectively + // making it known. + autoUnblock condition + + // This is called upon completion of states, allowing other states to be + // updated atomically. + complete func(s *scheduler) condition +} + +func (p *taskContext) current() *task { + if len(p.stack) == 0 { + return nil + } + return p.stack[len(p.stack)-1] +} + +func (p *taskContext) pushTask(t *task) { + p.stack = append(p.stack, t) +} + +func (p *taskContext) popTask() { + p.stack = p.stack[:len(p.stack)-1] +} + +func (p *taskContext) newTask() *task { + // TODO: allocate from pool. + return &task{} +} + +type taskState uint8 + +const ( + taskREADY taskState = iota + + taskRUNNING // processing conjunct(s) + taskWAITING // task is blocked on a property of an arc to hold + taskSUCCESS + taskFAILED +) + +type schedState uint8 + +const ( + schedREADY schedState = iota + + schedRUNNING // processing conjunct(s) + schedFINALIZING // all tasks completed, run new tasks immediately + schedSUCCESS + schedFAILED +) + +func (s schedState) done() bool { return s >= schedSUCCESS } + +func (s taskState) String() string { + switch s { + case taskREADY: + return "READY" + case taskRUNNING: + return "RUNNING" + case taskWAITING: + return "WAITING" + case taskSUCCESS: + return "SUCCESS" + case taskFAILED: + return "FAILED" + default: + return "UNKNOWN" + } +} + +func (s schedState) String() string { + switch s { + case schedREADY: + return "READY" + case schedRUNNING: + return "RUNNING" + case schedFINALIZING: + return "FINALIZING" + case schedSUCCESS: + return "SUCCESS" + case schedFAILED: + return "FAILED" + default: + return "UNKNOWN" + } +} + +// runMode indicates how to proceed after a condition could not be met. +type runMode uint8 + +const ( + // ignore indicates that the new evaluator should not do any processing. + // This is mostly used in the transition from old to new evaluator and + // should probably eventually be removed. + ignore runMode = 1 + iota + + // attemptOnly indicates that execution should continue even if the + // condition is not met. + attemptOnly + + // yield means that execution should be yielded if the condition is not met. + // That is, the task is marked as a dependency and control is returned to + // the runloop. The task will resume once the dependency is met. + yield + + // finalize means that uncompleted tasks should be turned into errors to + // complete the evaluation of a Vertex. + finalize +) + +func (r runMode) String() string { + switch r { + case ignore: + return "ignore" + case attemptOnly: + return "attemptOnly" + case yield: + return "yield" + case finalize: + return "finalize" + } + return "unknown" +} + +// condition is a bit mask of states that a task may depend on. +// +// There are generally two types of states: states that are met if all tasks +// that contribute to that state are completed (counter states), and states that +// are met if some global set of conditions are met. +type condition uint16 + +const ( + // allKnown indicates that all possible states are completed. + allKnown condition = 0x7fff + + // neverKnown is a special condition that is never met. It can be used to + // mark a task as impossible to complete. + neverKnown condition = 0x8000 +) + +func (c condition) meets(x condition) bool { + return c&x == x +} + +const numCompletionStates = 10 // TODO: make this configurable + +// A scheduler represents the set of outstanding tasks for a node. +type scheduler struct { + ctx *OpContext + node *nodeContext + + state schedState + + // completed is bit set of completed states. + completed condition + + // needs specifies all the states needed to complete tasks in this scheduler. + needs condition + + // provided specifies all the states that are provided by tasks added + // to this scheduler. + provided condition // TODO: rename to "provides"? To be consistent with "needs". + + // frozen indicates all states that are frozen. These bits should be checked + // before making a node more specific. + // TODO: do we need a separate field for this, or can we use completed? + frozen condition + + // isFrozen indicates if freeze was called explicitly. + // + // TODO: rename to isExplicitlyFrozen if it turns out we need both frozen + // and isFrozen. We probably do not. Check once the implementation of the + // new evaluator is complete. + isFrozen bool + + // counters keeps track of the number of uncompleted tasks that are + // outstanding for each of the possible conditions. A state is + // considered completed if the corresponding counter reaches zero. + counters [numCompletionStates]int + + // tasks lists all tasks that were scheduled for this scheduler. + // The list only contains tasks that are associated with this node. + // TODO: rename to queue and taskPos to nextQueueIndex. + tasks []*task + taskPos int + + // blocking is a list of tasks that are blocked on the completion of + // the indicate conditions. This can hold tasks from other nodes or tasks + // originating from this node itself. + blocking []*task +} + +func (s *scheduler) clear() { + // TODO(perf): free tasks into task pool + + *s = scheduler{ + ctx: s.ctx, + tasks: s.tasks[:0], + blocking: s.blocking[:0], + } +} + +// cloneInto initializes the state of dst to be the same as s. +// +// NOTE: this is deliberately not a pointer receiver: this approach allows +// cloning s into dst while preserving the buffers of dst and not having to +// explicitly clone any non-buffer fields. +func (s scheduler) cloneInto(dst *scheduler) { + s.tasks = append(dst.tasks, s.tasks...) + s.blocking = append(dst.blocking, s.blocking...) + + *dst = s +} + +// incrementCounts adds the counters for each condition. +// See also decrementCounts. +func (s *scheduler) incrementCounts(x condition) { + x &= s.ctx.counterMask + + for { + n := bits.TrailingZeros16(uint16(x)) + if n == 16 { + break + } + bit := condition(1 << n) + x &^= bit + + s.counters[n]++ + } +} + +// decrementCounts decrements the counters for each condition. If a counter for +// a condition reaches zero, it means that condition is met and all blocking +// tasks depending on that state can be run. +func (s *scheduler) decrementCounts(x condition) { + x &= s.ctx.counterMask + + var completed condition + for { + n := bits.TrailingZeros16(uint16(x)) + if n == 16 { + break + } + bit := condition(1 << n) + x &^= bit + + s.counters[n]-- + if s.counters[n] == 0 { + completed |= bit + } + } + + s.signal(completed) +} + +// finalize runs all tasks and signals that the scheduler is done upon +// completion for the given signals. +func (s *scheduler) finalize(completed condition) { + // Do not panic on cycle detection. Instead, post-process the tasks + // by collecting and marking cycle errors. + s.process(allKnown, finalize) + s.signal(completed) + if s.state == schedRUNNING { + if s.meets(s.needs) { + s.state = schedSUCCESS + } else { + s.state = schedFAILED + } + } +} + +// process advances a scheduler by executing tasks that are required. +// Depending on mode, if the scheduler is blocked on a condition, it will +// forcefully unblock the tasks. +func (s *scheduler) process(needs condition, mode runMode) bool { + c := s.ctx + + // Update completions, if necessary. + if f := c.taskContext.complete; f != nil { + s.signal(f(s)) + } + + if s.ctx.LogEval > 0 && len(s.tasks) > 0 { + if v := s.tasks[0].node.node; v != nil { + c.nest++ + c.Logf(v, "START Process %v -- mode: %v", v.Label, mode) + defer func() { + c.Logf(v, "END Process") + c.nest-- + }() + } + } + + // hasRunning := false + s.state = schedRUNNING + // Use variable instead of range, because s.tasks may grow during processes. + +processNextTask: + for s.taskPos < len(s.tasks) { + t := s.tasks[s.taskPos] + s.taskPos++ + + if t.state != taskREADY { + // TODO(perf): Figure out how it is possible to reach this and if we + // should optimize. + // panic("task not READY") + } + + switch { + case t.defunct: + continue + + case t.state == taskRUNNING: + // TODO: we could store the current referring node that caused + // the cycle and then proceed up the stack to mark all tasks + // that re involved in the cycle as well. Further, we could + // mark the cycle as a generation counter, instead of a boolean + // value, so that it will be trivial reconstruct a detailed cycle + // report when generating an error message. + + case t.state != taskREADY: + + default: + runTask(t, mode) + } + } + + switch mode { + default: // case attemptOnly: + return s.meets(needs) + + case yield: + if s.meets(needs) { + return true + } + c.current().waitFor(s, needs) + s.yield() + panic("unreachable") + + case finalize: + // remainder of function + } + +unblockTasks: + // Unblocking proceeds in three stages. Each of the stages may cause + // formerly blocked tasks to become unblocked. To ensure that unblocking + // tasks do not happen in an order-dependent way, we want to ensure that we + // have unblocked all tasks from one phase, before commencing to the next. + + // The types of the node can no longer be altered. We can unblock the + // relevant states first to finish up any tasks that were just waiting for + // types, such as lists. + for _, t := range c.blocking { + if t.blockedOn != nil { + t.blockedOn.signal(s.ctx.autoUnblock) + } + } + + // Mark all remaining conditions as "frozen" before actually running the + // tasks. Doing this before running the remaining tasks ensures that we get + // the same errors, regardless of the order in which tasks are unblocked. + for _, t := range c.blocking { + if t.blockedOn != nil { + t.blockedOn.freeze(t.blockCondition) + t.unblocked = true + } + } + + // Run the remaining blocked tasks. + numBlocked := len(c.blocking) + for _, t := range c.blocking { + if t.blockedOn != nil && !t.defunct { + n, cond := t.blockedOn, t.blockCondition + t.blockedOn, t.blockCondition = nil, neverKnown + n.signal(cond) + runTask(t, attemptOnly) // Does this need to be final? Probably not if we do a fixed point computation. + } + } + + // The running of tasks above may result in more tasks being added to the + // queue. Process these first before continuing. + if s.taskPos < len(s.tasks) { + goto processNextTask + } + + // Similarly, the running of tasks may result in more tasks being blocked. + // Ensure we processed them all. + if numBlocked < len(c.blocking) { + goto unblockTasks + } + + c.blocking = c.blocking[:0] + + return true +} + +// yield causes the current task to be suspended until the given conditions +// are met. +func (s *scheduler) yield() { + panic(s) +} + +// meets reports whether all needed completion states in s are met. +func (s *scheduler) meets(needs condition) bool { + s.node.assertInitialized() + + if s.state != schedREADY { + // Automatically qualify for conditions that are not provided by this node. + // NOTE: in the evaluator this is generally not the case, as tasks my still + // be added during evaluation until all ancestor nodes are evaluated. This + // can be encoded by the scheduler by adding a state "ancestorsCompleted". + // which all other conditions depend on. + needs &= s.provided + } + return s.completed&needs == needs +} + +// blockOn marks a state as uncompleted. +func (s *scheduler) blockOn(cond condition) { + // TODO: should we allow this to be used for counters states? + // if s.ctx.counterMask&cond != 0 { + // panic("cannot block on counter states") + // } + s.provided |= cond +} + +// signal causes tasks that are blocking on the given completion to be run +// for this scheduler. Tasks are only run if the completion state was not +// already reached before. +func (s *scheduler) signal(completed condition) { + was := s.completed + s.completed |= completed + if was == s.completed { + s.frozen |= completed + return + } + + s.completed |= s.ctx.complete(s) + s.frozen |= completed + + // TODO: this could benefit from a linked list where tasks are removed + // from the list before being run. + for _, t := range s.blocking { + if t.blockCondition&s.completed == t.blockCondition { + // Prevent task from running again. + t.blockCondition = neverKnown + t.blockedOn = nil + runTask(t, attemptOnly) // TODO: does this ever need to be final? + // TODO: should only be run once for each blocking queue. + } + } +} + +// freeze indicates no more tasks satisfying the given condition may be added. +// It is also used to freeze certain elements of the task. +func (s *scheduler) freeze(c condition) { + s.frozen |= c + s.completed |= c + s.ctx.complete(s) + s.isFrozen = true +} + +// signalDoneAdding signals that no more tasks will be added to this scheduler. +// This allows unblocking tasks that depend on states for which there are no +// tasks in this scheduler. +func (s *scheduler) signalDoneAdding() { + s.signal(s.needs &^ s.provided) +} + +// runner defines properties of a type of task, including a function to run it. +type runner struct { + name string + + // The mode argument indicates whether the scheduler + // of this field is finalizing. It is passed as a component of the required + // state to various evaluation methods. + f func(ctx *OpContext, t *task, mode runMode) + + // completes indicates which states this tasks contributes to. + completes condition + + // needes indicates which states of the corresponding node need to be + // completed before this task can be run. + needs condition + + // a lower priority indicates a preference to run a task before tasks + // of a higher priority. + priority int8 +} + +type task struct { + state taskState + + completes condition // cycles may alter the completion mask. TODO: is this still true? + + // defunct indicates that this task is no longer relevant. This is the case + // when it has not yet been run before it is copied into a disjunction. + defunct bool + + // unblocked indicates this task was unblocked by force. + unblocked bool + + // The following fields indicate what this task is blocked on, including + // the scheduler, which conditions it is blocking on, and the stack of + // tasks executed leading to the block. + + // blockedOn cannot be needed in a clone for a disjunct, because as long + // as the disjunct is unresolved, its value cannot contribute to another + // scheduler. + blockedOn *scheduler + blockCondition condition + blockStack []*task // TODO: use; for error reporting. + + err *Bottom + + // The node from which this conjunct originates. + node *nodeContext + + run *runner // TODO: use struct to make debugging easier? + + // The Conjunct processed by this task. + env *Environment + id CloseInfo // TODO: rename to closeInfo? + x Node // The conjunct Expression or Value. + + // For Comprehensions: + comp *envComprehension + leaf *Comprehension +} + +func (s *scheduler) insertTask(t *task) { + completes := t.run.completes + needs := t.run.needs + + s.needs |= needs + s.provided |= completes + + if needs&completes != 0 { + panic("task depends on its own completion") + } + t.completes = completes + + if s.state == schedFINALIZING { + runTask(t, finalize) + return + } + + s.incrementCounts(completes) + if cc := t.id.cc; cc != nil { + // may be nil for "group" tasks, such as processLists. + dep := cc.incDependent(t.node.ctx, TASK, nil) + if dep != nil { + dep.taskID = len(s.tasks) + dep.task = t + } + } + s.tasks = append(s.tasks, t) + + // Sort by priority. This code is optimized for the case that there are + // very few tasks with higher priority. This loop will almost always + // terminate within 0 or 1 iterations. + for i := len(s.tasks) - 1; i > s.taskPos; i-- { + if s.tasks[i-1].run.priority <= s.tasks[i].run.priority { + break + } + s.tasks[i], s.tasks[i-1] = s.tasks[i-1], s.tasks[i] + } + + if s.completed&needs != needs { + t.waitFor(s, needs) + } +} + +func runTask(t *task, mode runMode) { + if t.defunct { + return + } + t.node.Logf("============ RUNTASK %v %v", t.run.name, t.x) + ctx := t.node.ctx + + switch t.state { + case taskSUCCESS, taskFAILED: + return + case taskRUNNING: + // TODO: should we mark this as a cycle? + } + + defer func() { + if n := t.node; n.toComplete { + n.toComplete = false + n.completeNodeTasks(attemptOnly) + } + + switch r := recover().(type) { + case nil: + case *scheduler: + // Task must be WAITING. + if t.state == taskRUNNING { + t.state = taskSUCCESS // XXX: something else? Do we known the dependency? + if t.err != nil { + t.state = taskFAILED + } + } + default: + panic(r) + } + }() + + defer ctx.PopArc(ctx.PushArc(t.node.node)) + + // TODO: merge these two mechanisms once we get rid of the old evaluator. + ctx.pushTask(t) + defer ctx.popTask() + if t.env != nil { + id := t.id + id.cc = nil // this is done to avoid struct args from passing fields up. + s := ctx.PushConjunct(MakeConjunct(t.env, t.x, id)) + defer ctx.PopState(s) + } + + t.state = taskRUNNING + // A task may have recorded an error on a previous try. Clear it. + t.err = nil + + t.run.f(ctx, t, mode) + + if t.state != taskWAITING { + t.blockedOn = nil + t.blockCondition = neverKnown + + // TODO: always reporting errors in the current task would avoid us + // having to collect and assign errors here. + t.err = CombineErrors(nil, t.err, ctx.Err()) + if t.err == nil { + t.state = taskSUCCESS + } else { + t.state = taskFAILED + } + t.node.addBottom(t.err) // TODO: replace with something more principled. + + if t.id.cc != nil { + t.id.cc.decDependent(ctx, TASK, nil) + } + t.node.decrementCounts(t.completes) + t.completes = 0 // safety + } +} + +// waitFor blocks task t until the needs for scheduler s are met. +func (t *task) waitFor(s *scheduler, needs condition) { + if s.meets(needs) { + panic("waiting for condition that already completed") + } + // TODO: this line causes the scheduler state to fail if tasks are blocking + // on it. Is this desirable? At the very least we should then ensure that + // the scheduler where the tasks originate from will fail in that case. + s.needs |= needs + + t.state = taskWAITING + + t.blockCondition = needs + t.blockedOn = s + s.blocking = append(s.blocking, t) + s.ctx.blocking = append(s.ctx.blocking, t) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/share.go b/vendor/cuelang.org/go/internal/core/adt/share.go new file mode 100644 index 0000000000..09071a6c55 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/share.go @@ -0,0 +1,183 @@ +// Copyright 2024 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// This file contains logic regarding structure sharing. + +// Notes +// +// TODO: +// We may want to consider tracking closedness in parallel to the Vertex +// structure, for instance in a CloseInfo or in a cue.Value itself. +// +// reg: {} +// #def: sub: reg +// +// By tracking closedness inside the CloseInfo, we can still share the +// structure and only have to change +// +// Maybe this is okay, though, as #Def itself can be shared, at least. + +func (n *nodeContext) unshare() { + n.noSharing = true + + if !n.isShared { + return + } + n.isShared = false + n.node.IsShared = false + + v := n.node.BaseValue.(*Vertex) + + // TODO: the use of cycle for BaseValue is getting increasingly outdated. + // Find another mechanism once we get rid of the old evaluator. + n.node.BaseValue = n.origBaseValue + + n.scheduleVertexConjuncts(n.shared, v, n.sharedID) +} + +func (n *nodeContext) share(c Conjunct, arc *Vertex, id CloseInfo) { + if n.isShared { + panic("already sharing") + } + n.origBaseValue = n.node.BaseValue + n.node.BaseValue = arc + n.node.IsShared = true + n.isShared = true + n.shared = c + n.sharedID = id +} + +func (n *nodeContext) shareIfPossible(c Conjunct, arc *Vertex, id CloseInfo) bool { + // TODO: have an experiment here to enable or disable structure sharing. + // return false + if !n.ctx.Sharing { + return false + } + + if n.noSharing || n.isShared || n.ctx.errs != nil { + return false + } + + // This line is to deal with this case: + // + // reg: {} + // #def: sub: reg + // + // Ideally we find a different solution, like passing closedness + // down elsewhere. In fact, as we do this in closeContexts, it probably + // already works, it will just not be reflected in the debug output. + // We could fix that by not printing structure shared nodes, which is + // probably a good idea anyway. + // + // TODO: come up with a mechanism to allow this case. + if n.node.Closed && !arc.Closed { + return false + } + + // Sharing let expressions is not supported and will result in unmarked + // structural cycles. Processing will still terminate, but printing the + // result will result in an infinite loop. + // + // TODO: allow this case. + if n.node.Label.IsLet() { + return false + } + + // If an arc is a computed intermediate result and not part of a CUE output, + // it should not be shared. + if n.node.nonRooted || arc.nonRooted { + return false + } + + n.share(c, arc, id) + return true +} + +// Vertex values that are held in BaseValue will be wrapped in the following +// order: +// +// disjuncts -> (shared | computed | data) +// +// DerefDisjunct +// - get the current value under computation +// +// DerefValue +// - get the value the node ultimately represents. +// + +// DerefValue unrolls indirections of Vertex values. These may be introduced, +// for instance, by temporary bindings such as comprehension values. +// It returns v itself if v does not point to another Vertex. +func (v *Vertex) DerefValue() *Vertex { + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok { + return v + } + v = arc + } +} + +// DerefDisjunct indirects a node that points to a disjunction. +func (v *Vertex) DerefDisjunct() *Vertex { + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok || !arc.IsDisjunct { + return v + } + v = arc + } +} + +// DerefNonDisjunct indirects a node that points to a disjunction. +func (v *Vertex) DerefNonDisjunct() *Vertex { + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok || arc.IsDisjunct { + return v + } + v = arc + } +} + +// DerefNonRooted indirects a node that points to a value that is not rooted. +// This includes structure-shared nodes that point to a let field: let fields +// may or may not be part of a struct, and thus should be treated as non-rooted. +func (v *Vertex) DerefNonRooted() *Vertex { + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok || arc.IsDisjunct || (v.IsShared && !arc.Label.IsLet()) { + return v + } + v = arc + } +} + +// DerefNonShared finds the indirection of an arc that is not the result of +// structure sharing. This is especially relevant when indirecting disjunction +// values. +func (v *Vertex) DerefNonShared() *Vertex { + if v.state != nil && v.state.isShared { + return v + } + for { + arc, ok := v.BaseValue.(*Vertex) + if !ok { + return v + } + v = arc + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/states.go b/vendor/cuelang.org/go/internal/core/adt/states.go new file mode 100644 index 0000000000..09419291e2 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/states.go @@ -0,0 +1,328 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +// TODO: clean up following notes: + +// Used in expr.go: +// - ctx.value (uses: noncrete scalar allowed, concrete scalar, concrete composite) +// - evalState +// - ctx.node (need to know all fields) +// - ctx.lookup +// - ctx.concrete +// +// - ctx.relLabel +// OK: always exists +// - ctx.relNode (upcount + unify(partial)) +// OK: node always exists. +// +// - ctx.evalState (in validation of comparison against bottom) +// +// - arc.Finalize (finalized) +// - CompleteArcs (conjuncts) +// + +// lookup in p1 +// - process remaining field todos + +// lookup: +// if node is currently processing, just look up directly and create +// field with notification. +// +// if node has not been processed, process once. +// +// Any dynamic fields should have been triggered by the existence of a new +// arc. This will either cascade the evaluation or not. + +// p1: { +// (p1.baz): "bar" // t1 +// (p1.foo): "baz" // t2 +// baz: "foo" +// } +// +// -> t1 -> +// -> t1 +// -> t2 -> + +// p2: { +// (p2[p2.baz]): "bar" +// (p2.foo): "baz" +// baz: "qux" +// qux: "foo" +// } + +// b -> a - > b: detected cycle in b: +// +// xxx register expression (a-10) being processed as a post constraint. +// add task to pending. +// register value as waiting for scalar to be completed later. +// return with cycle/ in complete error. +// +// - in b: +// xxx register expression (b+10) as post constraint. +// add task to pending +// register value as waiting for scalar to be completed later. +// 5 is processed and set +// this completes the task in b +// this sets a scalar in b +// this completes the expression in a +// +// b: a - 10 +// a: b + 10 +// a: 5 +// +// a: a +// a: 5 +// + +// These are the condition types of the CUE evaluator. A scheduler +// is associated with a single Vertex. So when these states refer to a Vertex, +// it is the Vertex associated with the scheduler. +// +// There are core conditions and condition sets. The core conditions are +// determined during operation as conditions are met. The condition sets are +// used to indicate a set of required or provided conditions. +// +// Core conditions can be signal conditions or counter conditions. A counter +// condition is triggered if all conjuncts that contribute to the computation +// of this condition have been met. A signal condition is triggered as soon as +// evidence is found that this condition is met. Unless otherwise specified, +// conditions are counter conditions. +const ( + // allAncestorsProcessed indicates that all conjuncts that could be added + // to the Vertex by any of its ancestors have been added. In other words, + // all ancestors schedulers have reached the state fieldConjunctsKnown. + // + // This is a signal condition. It is explicitly set in unify when a + // parent meets fieldConjunctsKnown|allAncestorsProcessed. + allAncestorsProcessed condition = 1 << iota + + // Really: all ancestor subfield tasks processed. + + // arcTypeKnown means that the ArcType value of a Vertex is fully + // determined. The ArcType of all fields of a Vertex need to be known + // before the complete set of fields of this Vertex can be known. + arcTypeKnown + + // valueKnown means that it is known what the "type" of the value would be + // if present. + valueKnown + + // scalarKnown indicates that a Vertex has either a concrete scalar value or + // that it is known that it will never have a scalar value. + // + // This is a signal condition that is reached when: + // - a node is set to a concrete scalar value + // - a node is set to an error + // - or if ...state is reached. + // + // TODO: rename to something better? + scalarKnown + + // listTypeKnown indicates that it is known that lists unified with this + // Vertex should be interpreted as integer indexed lists, as associative + // lists, or an error. + // + // This is a signal condition that is reached when: + // - allFieldsKnown is reached (all expressions have ) + // - it is unified with an associative list type + listTypeKnown + + // fieldConjunctsKnown means that all the conjuncts of all fields are + // known. + fieldConjunctsKnown + + // fieldSetKnown means that all fields of this node are known. This is true + // if all tasks that can add a field have been processed and if + // all pending arcs have been resolved. + fieldSetKnown + + // // allConjunctsKnown means that all conjuncts have been registered as a + // // task. allParentsProcessed must be true for this to be true. + // allConjunctsKnown + + // allTasksCompleted means that all tasks of a Vertex have been completed + // with the exception of validation tasks. A Vertex may still not be + // finalized. + allTasksCompleted + + // subFieldsProcessed means that all tasks of a Vertex, including those of + // its arcs have been completed. + // + // This is a signal condition that is met if all arcs have reached the + // the state finalStateKnown. + // + subFieldsProcessed + + // disjunctionTask indicates that this task is a disjunction. This is + // used to trigger finalization of disjunctions. + disjunctionTask + + leftOfMaxCoreCondition + + finalStateKnown condition = leftOfMaxCoreCondition - 1 + + preValidation condition = finalStateKnown //&^ validationCompleted + + conditionsUsingCounters = arcTypeKnown | + valueKnown | + fieldConjunctsKnown | + allTasksCompleted + + // The xConjunct condition sets indicate a conjunct MAY contribute the to + // final result. For some conjuncts it may not be known what the + // contribution will be. In such a cases the set that reflects all possible + // contributions should be used. For instance, an embedded reference may + // resolve to a scalar or struct. + // + // All conjunct states include allTasksCompleted. + + // a genericConjunct is one for which the contributions to the states + // are not known in advance. For instance, an embedded reference can be + // anything. In such case, all conditions are included. + genericConjunct = allTasksCompleted | + scalarKnown | + valueKnown | + fieldConjunctsKnown + + // genericDisjunction is used to record processDisjunction tasks. + genericDisjunction = genericConjunct | disjunctionTask + + // a fieldConjunct is on that only adds a new field to the struct. + fieldConjunct = allTasksCompleted | + fieldConjunctsKnown + + // a scalarConjunct is one that is guaranteed to result in a scalar or + // list value. + scalarConjunct = allTasksCompleted | + scalarKnown | + valueKnown + + // needsX condition sets are used to indicate which conditions need to be + // met. + + needFieldConjunctsKnown = fieldConjunctsKnown | + allAncestorsProcessed + + needFieldSetKnown = fieldSetKnown | + allAncestorsProcessed + + needTasksDone = allAncestorsProcessed | allTasksCompleted + + // concreteKnown means that we know whether a value is concrete or not. + // At the moment this is equal to 'scalarKnown'. + concreteKnown = scalarKnown +) + +// schedConfig configures a taskContext with the states needed for the +// CUE evaluator. It is used in OpContext.New as a template for creating +// new taskContexts. +var schedConfig = taskContext{ + counterMask: conditionsUsingCounters, + autoUnblock: listTypeKnown | scalarKnown | arcTypeKnown, + complete: stateCompletions, +} + +// stateCompletions indicates the completion of conditions based on the +// completions of other conditions. +func stateCompletions(s *scheduler) condition { + x := s.completed + v := s.node.node + s.node.Logf("=== stateCompletions: %v %v", v.Label, s.completed) + if x.meets(allAncestorsProcessed) { + x |= conditionsUsingCounters &^ s.provided + // If we have a pending arc, a sub arc may still cause the arc to + // become not pending. For instance, if 'a' is pending in the following + // if x != _!_ { + // a: b: 1 + // } + // it may still become not pending if 'b' becomes a regular arc. + if s.counters[arcTypeKnown] == 0 && x.meets(subFieldsProcessed) { + x |= arcTypeKnown + } + } + switch { + case v.ArcType == ArcMember, v.ArcType == ArcNotPresent: + x |= arcTypeKnown + case x&arcTypeKnown != 0 && v.ArcType == ArcPending: + v.ArcType = ArcNotPresent + } + + if x.meets(valueKnown) { + // NOTE: in this case, scalarKnown is not the same as concreteKnown, + // especially if this arc is Pending, as it may still become concrete. + // We probably want to separate this out. + if v.ArcType == ArcMember || v.ArcType == ArcNotPresent { + x |= scalarKnown + } + x |= listTypeKnown + } + + if x.meets(needFieldConjunctsKnown | needTasksDone) { + switch { + case x.meets(subFieldsProcessed): + x |= fieldSetKnown + default: + for _, a := range v.Arcs { + if a.ArcType == ArcPending { + return x + } + } + x |= fieldSetKnown + } + } + return x +} + +// allChildConjunctsKnown indicates that all conjuncts have been added by +// the parents and every conjunct that may add fields to subfields have been +// processed. +func (v *Vertex) allChildConjunctsKnown() bool { + if v == nil { + return true + } + + return v.state.meets(fieldConjunctsKnown | allAncestorsProcessed) +} + +func (n *nodeContext) scheduleTask(r *runner, env *Environment, x Node, ci CloseInfo) *task { + t := &task{ + run: r, + node: n, + + env: env, + id: ci, + x: x, + } + n.insertTask(t) + return t +} + +// require ensures that a given condition is met for the given Vertex by +// evaluating it. It yields execution back to the scheduler if it cannot +// be completed at this point. +func (c *OpContext) require(v *Vertex, needs condition) { + state := v.getState(c) + if state == nil { + return + } + state.process(needs, yield) +} + +// scalarValue evaluates the given expression and either returns a +// concrete value or schedules the task for later evaluation. +func (ctx *OpContext) scalarValue(t *task, x Expr) Value { + return ctx.value(x, require(0, scalarKnown)) +} diff --git a/vendor/cuelang.org/go/internal/core/adt/tasks.go b/vendor/cuelang.org/go/internal/core/adt/tasks.go new file mode 100644 index 0000000000..00412de8d9 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/tasks.go @@ -0,0 +1,352 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +var ( + handleExpr *runner + handleResolver *runner + handleDynamic *runner + handlePatternConstraint *runner + handleComprehension *runner + handleListLit *runner + handleListVertex *runner + handleDisjunctions *runner +) + +// Use init to avoid a (spurious?) cyclic dependency in Go. +func init() { + handleExpr = &runner{ + name: "Expr", + f: processExpr, + completes: genericConjunct, + } + handleResolver = &runner{ + name: "Resolver", + f: processResolver, + completes: genericConjunct, + } + handleDynamic = &runner{ + name: "Dynamic", + f: processDynamic, + completes: fieldConjunct, + } + handlePatternConstraint = &runner{ + name: "PatternConstraint", + f: processPatternConstraint, + completes: allTasksCompleted | fieldConjunctsKnown, + } + handleComprehension = &runner{ + name: "Comprehension", + f: processComprehension, + completes: valueKnown | allTasksCompleted | fieldConjunctsKnown, + } + handleListLit = &runner{ + name: "ListLit", + f: processListLit, + completes: fieldConjunct, + needs: listTypeKnown, + } + handleListVertex = &runner{ + name: "ListVertex", + f: processListVertex, + completes: fieldConjunct, + needs: listTypeKnown, + } + handleDisjunctions = &runner{ + name: "Disjunctions", + f: processDisjunctions, + completes: genericDisjunction, + priority: 1, + } +} + +// This file contains task runners (func(ctx *OpContext, t *task, mode runMode)). + +func processExpr(ctx *OpContext, t *task, mode runMode) { + x := t.x.(Expr) + + state := combineMode(concreteKnown, mode) + v := ctx.evalState(x, state) + t.node.insertValueConjunct(t.env, v, t.id) +} + +func processResolver(ctx *OpContext, t *task, mode runMode) { + r := t.x.(Resolver) + + // TODO(perf): if we are resolving a value where we know a scalar value can + // be conclusive, we could avoid triggering evaluating disjunctions. This + // would be a pretty significant rework, though. + + arc := r.resolve(ctx, oldOnly(0)) + if arc == nil { + // TODO: yield instead? + return + } + arc = arc.DerefNonDisjunct() + + ctx.Logf(t.node.node, "RESOLVED %v to %v %v", r, arc.Label, fmt.Sprintf("%p", arc)) + // TODO: consider moving after markCycle or removing. + d := arc.DerefDisjunct() + + // A reference that points to itself indicates equality. In that case + // we are done computing and we can return the arc as is. + ci, skip := t.node.markCycle(d, t.env, r, t.id) + if skip { + return + } + + if t.defunct { + return + } + + c := MakeConjunct(t.env, t.x, ci) + t.node.scheduleVertexConjuncts(c, arc, ci) +} + +func processDynamic(ctx *OpContext, t *task, mode runMode) { + n := t.node + + field := t.x.(*DynamicField) + + v := ctx.scalarValue(t, field.Key) + if v == nil { + return + } + + if v.Concreteness() != Concrete { + n.addBottom(&Bottom{ + Code: IncompleteError, + Err: ctx.NewPosf(pos(field.Key), + "key value of dynamic field must be concrete, found %v", v), + }) + return + } + + f := ctx.Label(field.Key, v) + // TODO: remove this restriction. + if f.IsInt() { + n.addErr(ctx.NewPosf(pos(field.Key), "integer fields not supported")) + return + } + + c := MakeConjunct(t.env, field, t.id) + c.CloseInfo.cc = nil + n.insertArc(f, field.ArcType, c, t.id, true) +} + +func processPatternConstraint(ctx *OpContext, t *task, mode runMode) { + n := t.node + + field := t.x.(*BulkOptionalField) + + // Note that the result may be a disjunction. Be sure to not take the + // default value as we want to retain the options of the disjunction. + v := ctx.evalState(field.Filter, require(0, scalarKnown)) + if v == nil { + return + } + + n.insertPattern(v, MakeConjunct(t.env, t.x, t.id)) +} + +func processComprehension(ctx *OpContext, t *task, mode runMode) { + n := t.node + + y := &envYield{ + envComprehension: t.comp, + leaf: t.leaf, + env: t.env, + id: t.id, + expr: t.x, + } + + err := n.processComprehension(y, 0) + t.err = CombineErrors(nil, t.err, err) + t.comp.vertex.state.addBottom(err) +} + +func processDisjunctions(c *OpContext, t *task, mode runMode) { + n := t.node + err := n.processDisjunctions() + t.err = CombineErrors(nil, t.err, err) +} + +func processFinalizeDisjunctions(c *OpContext, t *task, mode runMode) { + n := t.node + n.finalizeDisjunctions() +} + +func processListLit(c *OpContext, t *task, mode runMode) { + n := t.node + + l := t.x.(*ListLit) + + n.updateCyclicStatus(t.id) + + var ellipsis Node + + index := int64(0) + hasComprehension := false + for j, elem := range l.Elems { + // TODO: Terminate early in case of runaway comprehension. + + switch x := elem.(type) { + case *Comprehension: + err := c.yield(nil, t.env, x, 0, func(e *Environment) { + label, err := MakeLabel(x.Source(), index, IntLabel) + n.addErr(err) + index++ + c := MakeConjunct(e, x.Value, t.id) + n.insertArc(label, ArcMember, c, t.id, true) + }) + hasComprehension = true + if err != nil { + n.addBottom(err) + return + } + + case *Ellipsis: + if j != len(l.Elems)-1 { + n.addErr(c.Newf("ellipsis must be last element in list")) + return + } + + elem := x.Value + if elem == nil { + elem = &Top{} + } + + c := MakeConjunct(t.env, elem, t.id) + pat := &BoundValue{ + Op: GreaterEqualOp, + Value: n.ctx.NewInt64(index, x), + } + n.insertPattern(pat, c) + ellipsis = x + + default: + label, err := MakeLabel(x.Source(), index, IntLabel) + n.addErr(err) + index++ + c := MakeConjunct(t.env, x, t.id) + n.insertArc(label, ArcMember, c, t.id, true) + } + + if max := n.maxListLen; n.listIsClosed && int(index) > max { + n.invalidListLength(max, len(l.Elems), n.maxNode, l) + return + } + } + + isClosed := ellipsis == nil + + switch max := n.maxListLen; { + case int(index) < max: + if isClosed { + n.invalidListLength(int(index), max, l, n.maxNode) + return + } + + case int(index) > max, + isClosed && !n.listIsClosed, + (isClosed == n.listIsClosed) && !hasComprehension: + n.maxListLen = int(index) + n.maxNode = l + n.listIsClosed = isClosed + } + + n.updateListType(l, t.id, isClosed, ellipsis) +} + +func processListVertex(c *OpContext, t *task, mode runMode) { + n := t.node + + l := t.x.(*Vertex) + + elems := l.Elems() + isClosed := l.IsClosedList() + + // TODO: Share with code above. + switch max := n.maxListLen; { + case len(elems) < max: + if isClosed { + n.invalidListLength(len(elems), max, l, n.maxNode) + return + } + + case len(elems) > max: + if n.listIsClosed { + n.invalidListLength(max, len(elems), n.maxNode, l) + return + } + n.listIsClosed = isClosed + n.maxListLen = len(elems) + n.maxNode = l + + case isClosed: + n.listIsClosed = true + n.maxNode = l + } + + for _, a := range elems { + if a.Conjuncts == nil { + c := MakeRootConjunct(nil, a) + n.insertArc(a.Label, ArcMember, c, CloseInfo{}, true) + continue + } + for _, c := range a.Conjuncts { + c.CloseInfo.cc = t.id.cc + n.insertArc(a.Label, ArcMember, c, t.id, true) + } + } + + n.updateListType(l, t.id, isClosed, nil) +} + +func (n *nodeContext) updateListType(list Expr, id CloseInfo, isClosed bool, ellipsis Node) { + if n.kind == 0 { + n.node.updateStatus(finalized) // TODO(neweval): remove once transitioned. + return + } + m, ok := n.node.BaseValue.(*ListMarker) + if !ok { + m = &ListMarker{ + IsOpen: true, + } + n.node.setValue(n.ctx, conjuncts, m) + } + m.IsOpen = m.IsOpen && !isClosed + + if ellipsis != nil { + if src, _ := ellipsis.Source().(ast.Expr); src != nil { + if m.Src == nil { + m.Src = src + } else { + m.Src = ast.NewBinExpr(token.AND, m.Src, src) + } + } + } + + if n.kind != ListKind { + n.updateNodeType(ListKind, list, id) + } +} diff --git a/vendor/cuelang.org/go/internal/core/adt/unify.go b/vendor/cuelang.org/go/internal/core/adt/unify.go new file mode 100644 index 0000000000..5954630ff0 --- /dev/null +++ b/vendor/cuelang.org/go/internal/core/adt/unify.go @@ -0,0 +1,760 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "fmt" + + "cuelang.org/go/cue/token" +) + +func (v *Vertex) isInitialized() bool { + return v.status == finalized || (v.state != nil && v.state.isInitialized) +} + +func (n *nodeContext) assertInitialized() { + if n != nil && n.ctx.isDevVersion() { + if v := n.node; !v.isInitialized() { + panic(fmt.Sprintf("vertex %p not initialized", v)) + } + } +} + +// isInProgress reports whether v is in the midst of being evaluated. This means +// that conjuncts have been scheduled, but that it has not been finalized. +func (v *Vertex) isInProgress() bool { + return v.status != finalized && v.state != nil && v.state.isInitialized +} + +func (v *Vertex) getBareState(c *OpContext) *nodeContext { + if v.status == finalized { // TODO: use BaseValue != nil + return nil + } + if v.state == nil { + v.state = c.newNodeContext(v) + v.state.initBare() + v.state.refCount = 1 + } + + // An additional refCount for the current user. + v.state.refCount += 1 + + // TODO: see if we can get rid of ref counting after new evaluator is done: + // the recursive nature of the new evaluator should make this unnecessary. + + return v.state +} + +func (v *Vertex) getState(c *OpContext) *nodeContext { + s := v.getBareState(c) + if s != nil && !s.isInitialized { + s.scheduleConjuncts() + } + return s +} + +// initNode initializes a nodeContext for the evaluation of the given Vertex. +func (n *nodeContext) initBare() { + v := n.node + if v.Parent != nil && v.Parent.state != nil { + v.state.depth = v.Parent.state.depth + 1 + n.blockOn(allAncestorsProcessed) + } + + n.blockOn(scalarKnown | listTypeKnown | arcTypeKnown) + + if v.Label.IsDef() { + v.Closed = true + } + + if v.Parent != nil { + if v.Parent.Closed { + v.Closed = true + } + } +} + +func (n *nodeContext) scheduleConjuncts() { + n.isInitialized = true + + v := n.node + ctx := n.ctx + + ctx.stats.Unifications++ + + // Set the cache to a cycle error to ensure a cyclic reference will result + // in an error if applicable. A cyclic error may be ignored for + // non-expression references. The cycle error may also be removed as soon + // as there is evidence what a correct value must be, but before all + // validation has taken place. + // + // TODO(cycle): having a more recursive algorithm would make this + // special cycle handling unnecessary. + v.BaseValue = cycle + + defer ctx.PopArc(ctx.PushArc(v)) + + root := n.node.rootCloseContext(n.ctx) + root.incDependent(n.ctx, INIT, nil) // decremented below + + for _, c := range v.Conjuncts { + ci := c.CloseInfo + ci.cc = root + n.scheduleConjunct(c, ci) + } + + root.decDependent(ctx, INIT, nil) +} + +func (v *Vertex) unify(c *OpContext, needs condition, mode runMode) bool { + if c.LogEval > 0 { + c.nest++ + c.Logf(v, "Unify %v", fmt.Sprintf("%p", v)) + defer func() { + c.Logf(v, "END Unify") + c.nest-- + }() + } + + if mode == ignore { + return false + } + + n := v.getState(c) + if n == nil { + return true // already completed + } + defer n.free() + + defer n.unmarkDepth(n.markDepth()) + + // Typically a node processes all conjuncts before processing its fields. + // So this condition is very likely to trigger. If for some reason the + // parent has not been processed yet, we could attempt to process more + // of its tasks to increase the chances of being able to find the + // information we are looking for here. For now we just continue as is. + // + // For dynamic nodes, the parent only exists to provide a path context. + // + // Note that if mode is final, we will guarantee that the conditions for + // this if clause are met down the line. So we assume this is already the + // case and set the signal accordingly if so. + if v.Label.IsLet() || v.IsDynamic || v.Parent.allChildConjunctsKnown() || mode == finalize { + n.signal(allAncestorsProcessed) + } + + nodeOnlyNeeds := needs &^ (subFieldsProcessed) + n.process(nodeOnlyNeeds, mode) + + defer c.PopArc(c.PushArc(v)) + + w := v.DerefDisjunct() + if w != v { + // Should resolve with dereference. + v.Closed = w.Closed + v.status = w.status + v.ChildErrors = CombineErrors(nil, v.ChildErrors, w.ChildErrors) + v.Arcs = nil + return w.state.meets(needs) + } + n.updateScalar() + + if n.aStruct != nil { + n.updateNodeType(StructKind, n.aStruct, n.aStructID) + } + + // First process all but the subfields. + switch { + case n.meets(nodeOnlyNeeds): + // pass through next phase. + case mode != finalize: + // TODO: disjunctions may benefit from evaluation as much prematurely + // as possible, as this increases the chances of premature failure. + // We should consider doing a recursive "attemptOnly" evaluation here. + return false + } + + if n.isShared { + if isCyclePlaceholder(n.origBaseValue) { + n.origBaseValue = nil + } + } else if isCyclePlaceholder(n.node.BaseValue) { + n.node.BaseValue = nil + } + if !n.isShared { + // TODO(sharewithval): allow structure sharing if we only have validator + // and references. + // TODO: rewrite to use mode when we get rid of old evaluator. + state := finalized + n.validateValue(state) + } + + if n.node.Label.IsLet() || n.meets(allAncestorsProcessed) { + if cc := v.rootCloseContext(n.ctx); !cc.isDecremented { // TODO: use v.cc + cc.decDependent(c, ROOT, nil) // REF(decrement:nodeDone) + cc.isDecremented = true + } + } + + // At this point, no more conjuncts will be added, so we could decrement + // the notification counters. + + switch { + case n.completed&subFieldsProcessed != 0: + // done + + case needs&subFieldsProcessed != 0: + if DebugSort > 0 { + DebugSortArcs(n.ctx, n.node) + } + + switch { + case assertStructuralCycle(n): + // TODO: consider bailing on error if n.errs != nil. + case n.completeAllArcs(needs, mode): + } + + if mode == finalize { + n.signal(subFieldsProcessed) + } + + if v.BaseValue == nil { + // TODO: this seems to not be possible. Possibly remove. + state := finalized + v.BaseValue = n.getValidators(state) + } + if v := n.node.Value(); v != nil && IsConcrete(v) { + // Ensure that checks are not run again when this value is used + // in a validator. + checks := n.checks + n.checks = n.checks[:0] + for _, v := range checks { + // TODO(errors): make Validate return bottom and generate + // optimized conflict message. Also track and inject IDs + // to determine origin location.s + if b := c.Validate(v, n.node); b != nil { + n.addBottom(b) + } + } + } + + case needs&fieldSetKnown != 0: + n.evalArcTypes() + } + + if err := n.getErr(); err != nil { + n.errs = nil + if b := n.node.Bottom(); b != nil { + err = CombineErrors(nil, b, err) + } + n.node.BaseValue = err + } + + if mode == attemptOnly { + return n.meets(needs) + } + + if mask := n.completed & needs; mask != 0 { + // TODO: phase3: validation + n.signal(mask) + } + + n.finalizeDisjunctions() + + w = v.DerefValue() // Dereference anything, including shared nodes. + if w != v { + // Clear value fields that are now referred to in the dereferenced + // value (w). + v.ChildErrors = nil + v.Arcs = nil + + result := w.unify(c, needs, mode) + + // Set control fields that are referenced without dereferencing. + if w.Closed { + v.Closed = true + } + if w.HasEllipsis { + v.HasEllipsis = true + } + v.status = w.status + + return result + } + + // TODO: adding this is wrong, but it should not cause the snippet below + // to hang. Investigate. + // v.Closed = v.cc.isClosed + // + // This hangs: + // issue1940: { + // #T: ["a", #T] | ["c", #T] | ["d", [...#T]] + // #A: t: #T + // #B: x: #A + // #C: #B + // #C: x: #A + // } + + // validationCompleted + if n.completed&(subFieldsProcessed) != 0 { + n.node.HasEllipsis = n.node.cc.hasEllipsis + + n.node.updateStatus(finalized) + + defer n.unmarkOptional(n.markOptional()) + + // The next piece of code addresses the following case. + // order matters + // c1: c: [string]: f2 + // f2: c1 + // Also: cycle/issue990 + if pc := n.node.PatternConstraints; pc != nil { + for _, c := range pc.Pairs { + c.Constraint.Finalize(n.ctx) + } + } + + if DebugDeps { + RecordDebugGraph(n.ctx, n.node, "Finalize") + } + } + + return n.meets(needs) +} + +// Once returning, all arcs plus conjuncts that can be known are known. +// +// Proof: +// - if there is a cycle, all completeNodeConjuncts will be called +// repeatedly for all nodes in this cycle, and all tasks on the cycle +// will have run at least once. +// - any tasks that were blocking on values on this circle to be completed +// will thus have to be completed at some point in time if they can. +// - any tasks that were blocking on values outside of this ring will have +// initiated its own execution, which is either not cyclic, and thus +// completes, or is on a different cycle, in which case it completes as +// well. +// +// Goal: +// - complete notifications +// - decrement reference counts for root and notify. +// NOT: +// - complete value. That is reserved for Unify. +func (n *nodeContext) completeNodeTasks(mode runMode) { + n.assertInitialized() + + if n.isCompleting > 0 { + return + } + n.isCompleting++ + defer func() { + n.isCompleting-- + }() + + v := n.node + c := n.ctx + + if n.ctx.LogEval > 0 { + c.nest++ + defer func() { + c.nest-- + }() + } + + if p := v.Parent; p != nil && p.state != nil { + if !v.IsDynamic && n.completed&allAncestorsProcessed == 0 { + p.state.completeNodeTasks(mode) + } + } + + if v.IsDynamic || v.Parent.allChildConjunctsKnown() { + n.signal(allAncestorsProcessed) + } + + if len(n.scheduler.tasks) != n.scheduler.taskPos { + // TODO: do we need any more requirements here? + const needs = valueKnown | fieldConjunctsKnown + + n.process(needs, mode) + n.updateScalar() + } + + // Check: + // - parents (done) + // - incoming notifications + // - pending arcs (or incoming COMPS) + // TODO: replace with something more principled that does not piggyback on + // debug information. + for _, r := range v.cc.externalDeps { + src := r.src + a := &src.arcs[r.index] + if a.decremented || a.kind != NOTIFY { + continue + } + if n := src.src.getState(n.ctx); n != nil { + n.completeNodeTasks(mode) + } + } + + // As long as ancestors are not processed, it is still possible for + // conjuncts to be inserted. Until that time, it is not okay to decrement + // theroot. It is not necessary to wait on tasks to complete, though, + // as pending tasks will have their own dependencies on root, meaning it + // is safe to decrement here. + if !n.meets(allAncestorsProcessed) && !n.node.Label.IsLet() && mode != finalize { + return + } + + // At this point, no more conjuncts will be added, so we could decrement + // the notification counters. + + if cc := v.rootCloseContext(n.ctx); !cc.isDecremented { // TODO: use v.cc + cc.isDecremented = true + + cc.decDependent(n.ctx, ROOT, nil) // REF(decrement:nodeDone) + } + + return +} + +func (n *nodeContext) updateScalar() { + // Set BaseValue to scalar, but only if it was not set before. Most notably, + // errors should not be discarded. + if n.scalar != nil && (!n.node.IsErr() || isCyclePlaceholder(n.node.BaseValue)) { + n.node.BaseValue = n.scalar + n.signal(scalarKnown) + } +} + +func (n *nodeContext) completeAllArcs(needs condition, mode runMode) bool { + if n.node.status == evaluatingArcs { + // NOTE: this was an "incomplete" error pre v0.6. If this is a problem + // we could make this a CycleError. Technically, this may be correct, + // as it is possible to make the values exactly as the inserted + // values. It seems more user friendly to just disallow this, though. + // TODO: make uniform error messages + // see compbottom2.cue: + n.ctx.addErrf(CycleError, pos(n.node), "mutual dependency") + n.node.IsCyclic = true + // Consider using this, although not all + // mutual dependencies are irrecoverable. + // n.reportCycleError() + } + + // TODO: remove the use of updateStatus as a cycle detection mechanism. + n.node.updateStatus(evaluatingArcs) + + if n.underlying != nil { + // References within the disjunct may end up referencing the layer that + // this node overlays. Also for these nodes we want to be able to detect + // structural cycles early. For this reason, we also set the + // evaluatingArcs status in the underlying layer. + // + // TODO: for now, this seems not necessary. Moreover, this will cause + // benchmarks/cycle to display a spurious structural cycle. But it + // shortens some of the structural cycle depths. So consider using this. + // + // status := n.underlying.status + // n.underlying.updateStatus(evaluatingArcs) defer func() { + // n.underlying.status = status }() + } + + // TODO: this should only be done if n is not currently running tasks. + // Investigate how to work around this. + n.completeNodeTasks(finalize) + + for _, r := range n.node.cc.externalDeps { + src := r.src + a := &src.arcs[r.index] + if a.decremented { + continue + } + a.decremented = true + + // FIXME: we should be careful to not evaluate parent nodes if we + // are inside a disjunction, or at least ensure that there are no + // disjunction values leaked into non-disjunction nodes through + // evaluating externalDeps. + src.src.unify(n.ctx, needTasksDone, attemptOnly) + a.cc.decDependent(n.ctx, a.kind, src) // REF(arcs) + } + + n.incDepth() + + // XXX(0.7): only set success if needs complete arcs. + success := true + // Visit arcs recursively to validate and compute error. + for n.arcPos < len(n.node.Arcs) { + a := n.node.Arcs[n.arcPos] + n.arcPos++ + + if !a.unify(n.ctx, needs, mode) { + success = false + } + + // At this point we need to ensure that all notification cycles + // for Arc a have been processed. + + if a.ArcType == ArcPending { + // TODO: cancel tasks? + // TODO: is this ever run? Investigate once new evaluator work is + // complete. + a.ArcType = ArcNotPresent + continue + } + + // Errors are allowed in let fields. Handle errors and failure to + // complete accordingly. + if !a.Label.IsLet() && a.ArcType <= ArcRequired { + a := a.DerefValue() + if err := a.Bottom(); err != nil { + n.node.AddChildError(err) + } + success = true // other arcs are irrelevant + } + + // TODO: harmonize this error with "cannot combine" + switch { + case a.ArcType > ArcRequired, !a.Label.IsString(): + case n.kind&StructKind == 0: + if !n.node.IsErr() { + n.reportFieldMismatch(pos(a.Value()), nil, a.Label, n.node.Value()) + } + // case !wasVoid: + // case n.kind == TopKind: + // // Theoretically it may be possible that a "void" arc references + // // this top value where it really should have been a struct. One + // // way to solve this is to have two passes over the arcs, where + // // the first pass additionally analyzes whether comprehensions + // // will yield values and "un-voids" an arc ahead of the rest. + // // + // // At this moment, though, I fail to see a possibility to create + // // faulty CUE using this mechanism, though. At most error + // // messages are a bit unintuitive. This may change once we have + // // functionality to reflect on types. + // if _, ok := n.node.BaseValue.(*Bottom); !ok { + // n.node.BaseValue = &StructMarker{} + // n.kind = StructKind + // } + } + } + + n.decDepth() + + k := 0 + for _, a := range n.node.Arcs { + if a.ArcType != ArcNotPresent { + n.node.Arcs[k] = a + k++ + } + } + n.node.Arcs = n.node.Arcs[:k] + + // Strip struct literals that were not initialized and are not part + // of the output. + // + // TODO(perf): we could keep track if any such structs exist and only + // do this removal if there is a change of shrinking the list. + k = 0 + for _, s := range n.node.Structs { + if s.initialized { + n.node.Structs[k] = s + k++ + } + } + n.node.Structs = n.node.Structs[:k] + + // TODO: This seems to be necessary, but enables structural cycles. + // Evaluator whether we still need this. + // + // pc := n.node.PatternConstraints + // if pc == nil { + // return success + // } + // for _, c := range pc.Pairs { + // c.Constraint.Finalize(n.ctx) + // } + + return success +} + +func (n *nodeContext) evalArcTypes() { + for _, a := range n.node.Arcs { + if a.ArcType != ArcPending { + continue + } + a.unify(n.ctx, arcTypeKnown, yield) + // Ensure the arc is processed up to the desired level + if a.ArcType == ArcPending { + // TODO: cancel tasks? + a.ArcType = ArcNotPresent + } + } +} + +func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFlags) *Vertex { + task := c.current() + needs := flags.conditions() + runMode := flags.runMode() + + v = v.DerefValue() + + c.Logf(c.vertex, "LOOKUP %v", f) + + state := v.getState(c) + if state != nil { + // If the scheduler associated with this vertex was already running, + // it means we have encountered a cycle. In that case, we allow to + // proceed with partial data, in which case a "pending" arc will be + // created to be completed later. + + // Report error for now. + if state.hasErr() { + c.AddBottom(state.getErr()) + } + // TODO: ideally this should not be run at this point. Consider under + // which circumstances this is still necessary, and at least ensure + // this will not be run if node v currently has a running task. + state.completeNodeTasks(attemptOnly) + } + + // TODO: remove because unnecessary? + if task != nil && task.state != taskRUNNING { + return nil // abort, task is blocked or terminated in a cycle. + } + + // TODO: verify lookup types. + + arc := v.LookupRaw(f) + // We leave further dereferencing to the caller, but we do dereference for + // the remainder of this function to be able to check the status. + arcReturn := arc + if arc != nil { + arc = arc.DerefNonRooted() + // TODO(perf): NonRooted is the minimum, but consider doing more. + // arc = arc.DerefValue() + } + + // TODO: clean up this logic: + // - signal arcTypeKnown when ArcMember or ArcNotPresent is set, + // similarly to scalarKnown. + // - make it clear we want to yield if it is now known if a field exists. + + var arcState *nodeContext + switch { + case arc != nil: + if arc.ArcType == ArcMember { + return arcReturn + } + arcState = arc.getState(c) + + case state == nil || state.meets(needTasksDone): + // This arc cannot exist. + v.reportFieldIndexError(c, pos, f) + return nil + + default: + arc = &Vertex{Parent: state.node, Label: f, ArcType: ArcPending} + v.Arcs = append(v.Arcs, arc) + arcState = arc.getState(c) // TODO: consider using getBareState. + } + + if arcState != nil && (!arcState.meets(needTasksDone) || !arcState.meets(arcTypeKnown)) { + needs |= arcTypeKnown + // If this arc is not ArcMember, which it is not at this point, + // any pending arcs could influence the field set. + for _, a := range arc.Arcs { + if a.ArcType == ArcPending { + needs |= fieldSetKnown + break + } + } + arcState.completeNodeTasks(attemptOnly) + + // Child nodes, if pending and derived from a comprehension, may + // still cause this arc to become not pending. + if arc.ArcType != ArcMember { + for _, a := range arcState.node.Arcs { + if a.ArcType == ArcPending { + a.unify(c, arcTypeKnown, runMode) + } + } + } + + switch runMode { + case ignore, attemptOnly: + // TODO: should we avoid notifying ArcPending vertices here? + if task != nil { + arcState.addNotify2(task.node.node, task.id) + } + return arcReturn + + case yield: + arcState.process(needs, yield) + // continue processing, as successful processing may still result + // in an invalid field. + + case finalize: + // TODO: should we try to use finalize? Using it results in errors and this works. It would be more principled, though. + arcState.process(needs, yield) + } + } + + switch arc.ArcType { + case ArcMember: + return arcReturn + + case ArcOptional, ArcRequired: + label := f.SelectorString(c.Runtime) + b := &Bottom{ + Code: IncompleteError, + Err: c.NewPosf(pos, + "cannot reference optional field: %s", label), + } + c.AddBottom(b) + // TODO: yield failure + return nil + + case ArcNotPresent: + v.reportFieldIndexError(c, pos, f) + return nil + + case ArcPending: + // should not happen. + panic("unreachable") + } + + v.reportFieldIndexError(c, pos, f) + return nil +} + +// accept reports whether the given feature is allowed by the pattern +// constraints. +func (v *Vertex) accept(ctx *OpContext, f Feature) bool { + // TODO: this is already handled by callers at the moment, but it may be + // better design to move this here. + // if v.LookupRaw(f) != nil { + // return true, true + // } + + v = v.DerefValue() + + pc := v.PatternConstraints + if pc == nil { + return false + } + + return matchPattern(ctx, pc.Allowed, f) +} diff --git a/vendor/cuelang.org/go/internal/core/convert/go.go b/vendor/cuelang.org/go/internal/core/convert/go.go index 4d0686364e..0d567bedd4 100644 --- a/vendor/cuelang.org/go/internal/core/convert/go.go +++ b/vendor/cuelang.org/go/internal/core/convert/go.go @@ -21,7 +21,7 @@ import ( "fmt" "math/big" "reflect" - "sort" + "slices" "strconv" "strings" @@ -275,6 +275,7 @@ func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value { res, _ := internal.BaseContext.RoundToIntegralExact(&d, v) if !res.Inexact() { kind = adt.IntKind + v = &d } n := &adt.Num{Src: ctx.Source(), K: kind} n.X = *v @@ -339,14 +340,15 @@ func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value { return toUint(ctx, uint64(v)) case float64: n := &adt.Num{Src: src, K: adt.FloatKind} - _, _, err := n.X.SetString(fmt.Sprint(v)) + _, err := n.X.SetFloat64(v) if err != nil { return ctx.AddErr(errors.Promote(err, "invalid float")) } return n case float32: n := &adt.Num{Src: src, K: adt.FloatKind} - _, _, err := n.X.SetString(fmt.Sprint(v)) + // apd.Decimal has a SetFloat64 method, but no SetFloat32. + _, _, err := n.X.SetString(strconv.FormatFloat(float64(v), 'E', -1, 32)) if err != nil { return ctx.AddErr(errors.Promote(err, "invalid float")) } @@ -478,12 +480,10 @@ func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - keys := value.MapKeys() - sort.Slice(keys, func(i, j int) bool { - return fmt.Sprint(keys[i]) < fmt.Sprint(keys[j]) - }) - for _, k := range keys { - val := value.MapIndex(k) + iter := value.MapRange() + for iter.Next() { + k := iter.Key() + val := iter.Value() // if isNil(val) { // continue // } @@ -512,6 +512,9 @@ func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value { } v.Arcs = append(v.Arcs, arc) } + slices.SortFunc(v.Arcs, func(a, b *adt.Vertex) int { + return strings.Compare(a.Label.IdentString(ctx), b.Label.IdentString(ctx)) + }) } return v diff --git a/vendor/cuelang.org/go/internal/core/debug/compact.go b/vendor/cuelang.org/go/internal/core/debug/compact.go index 20c695e879..87c31fca78 100644 --- a/vendor/cuelang.org/go/internal/core/debug/compact.go +++ b/vendor/cuelang.org/go/internal/core/debug/compact.go @@ -21,6 +21,7 @@ package debug import ( "fmt" + "strconv" "cuelang.org/go/cue/literal" "cuelang.org/go/internal/core/adt" @@ -30,6 +31,10 @@ type compactPrinter struct { printer } +func (w *compactPrinter) string(s string) { + w.dst = append(w.dst, s...) +} + func (w *compactPrinter) node(n adt.Node) { switch x := n.(type) { case *adt.Vertex: @@ -81,6 +86,11 @@ func (w *compactPrinter) node(n adt.Node) { } w.string("]") + case *adt.Vertex: + if v, ok := w.printShared(x); !ok { + w.node(v) + } + case adt.Value: w.node(v) } @@ -112,16 +122,14 @@ func (w *compactPrinter) node(n adt.Node) { w.string("]") case *adt.Field: - s := w.labelString(x.Label) - w.string(s) + w.label(x.Label) w.string(x.ArcType.Suffix()) w.string(":") w.node(x.Value) case *adt.LetField: w.string("let ") - s := w.labelString(x.Label) - w.string(s) + w.label(x.Label) if x.IsMulti { w.string("m") } @@ -158,29 +166,29 @@ func (w *compactPrinter) node(n adt.Node) { w.string("null") case *adt.Bool: - fmt.Fprint(w, x.B) + w.dst = strconv.AppendBool(w.dst, x.B) case *adt.Num: - fmt.Fprint(w, &x.X) + w.string(x.X.String()) case *adt.String: - w.string(literal.String.Quote(x.Str)) + w.dst = literal.String.Append(w.dst, x.Str) case *adt.Bytes: - w.string(literal.Bytes.Quote(string(x.B))) + w.dst = literal.Bytes.Append(w.dst, string(x.B)) case *adt.Top: w.string("_") case *adt.BasicType: - fmt.Fprint(w, x.K) + w.string(x.K.String()) case *adt.BoundExpr: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.Expr) case *adt.BoundValue: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.Value) case *adt.NodeLink: @@ -246,13 +254,15 @@ func (w *compactPrinter) node(n adt.Node) { w.interpolation(x) case *adt.UnaryExpr: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.X) case *adt.BinaryExpr: w.string("(") w.node(x.X) - fmt.Fprint(w, " ", x.Op, " ") + w.string(" ") + w.string(x.Op.String()) + w.string(" ") w.node(x.Y) w.string(")") @@ -307,6 +317,14 @@ func (w *compactPrinter) node(n adt.Node) { w.node(c) } + case *adt.ConjunctGroup: + for i, c := range *x { + if i > 0 { + w.string(" & ") + } + w.node(c.Expr()) + } + case *adt.Disjunction: for i, c := range x.Values { if i > 0 { diff --git a/vendor/cuelang.org/go/internal/core/debug/debug.go b/vendor/cuelang.org/go/internal/core/debug/debug.go index 717c3a38cb..ce37ee5869 100644 --- a/vendor/cuelang.org/go/internal/core/debug/debug.go +++ b/vendor/cuelang.org/go/internal/core/debug/debug.go @@ -21,7 +21,6 @@ package debug import ( "fmt" - "io" "strconv" "strings" @@ -41,18 +40,19 @@ type Config struct { Raw bool } -// WriteNode writes a string representation of the node to w. -func WriteNode(w io.Writer, i adt.StringIndexer, n adt.Node, config *Config) { +// AppendNode writes a string representation of the node to w. +func AppendNode(dst []byte, i adt.StringIndexer, n adt.Node, config *Config) []byte { if config == nil { config = &Config{} } - p := printer{Writer: w, index: i, cfg: config} + p := printer{dst: dst, index: i, cfg: config} if config.Compact { p := compactPrinter{p} p.node(n) - } else { - p.node(n) + return p.dst } + p.node(n) + return p.dst } // NodeString returns a string representation of the given node. @@ -60,13 +60,12 @@ func WriteNode(w io.Writer, i adt.StringIndexer, n adt.Node, config *Config) { // Commonly available implementations of StringIndexer include *adt.OpContext // and *runtime.Runtime. func NodeString(i adt.StringIndexer, n adt.Node, config *Config) string { - b := &strings.Builder{} - WriteNode(b, i, n, config) - return b.String() + var buf [128]byte + return string(AppendNode(buf[:0], i, n, config)) } type printer struct { - io.Writer + dst []byte index adt.StringIndexer indent string cfg *Config @@ -79,42 +78,79 @@ type printer struct { } func (w *printer) string(s string) { - s = strings.Replace(s, "\n", "\n"+w.indent, -1) - _, _ = io.WriteString(w, s) -} - -func (w *printer) label(f adt.Feature) { - w.string(w.labelString(f)) + if len(w.indent) > 0 { + s = strings.Replace(s, "\n", "\n"+w.indent, -1) + } + w.dst = append(w.dst, s...) } -func (w *printer) ident(f adt.Feature) { - w.string(f.IdentString(w.index)) +func (w *printer) int(i int64) { + w.dst = strconv.AppendInt(w.dst, i, 10) } -// TODO: fold into label once :: is no longer supported. -func (w *printer) labelString(f adt.Feature) string { +func (w *printer) label(f adt.Feature) { switch { case f.IsHidden(): ident := f.IdentString(w.index) if pkgName := f.PkgID(w.index); pkgName != "_" { ident = fmt.Sprintf("%s(%s)", ident, pkgName) } - return ident + w.string(ident) case f.IsLet(): ident := f.RawString(w.index) ident = strings.Replace(ident, "\x00", "#", 1) - return ident + w.string(ident) default: - return f.SelectorString(w.index) + w.string(f.SelectorString(w.index)) } } +func (w *printer) ident(f adt.Feature) { + w.string(f.IdentString(w.index)) +} + +func (w *printer) path(v *adt.Vertex) { + if p := v.Parent; p != nil && p.Label != 0 { + w.path(v.Parent) + w.string(".") + } + w.label(v.Label) +} + +func (w *printer) shared(v *adt.Vertex) { + w.string("~(") + w.path(v) + w.string(")") +} + +// printShared prints a reference to a structure-shared node that is a value +// of v, if it is a shared node. It reports the dereferenced node and whether +// the node was printed. +func (w *printer) printShared(v *adt.Vertex) (x *adt.Vertex, ok bool) { + // Handle cyclic shared nodes differently. If a shared node was part of + // a disjunction, it will still be wrapped in a disjunct Vertex. + // Similarly, a shared node should never point to a disjunct directly, + // but rather to the original arc that subsequently points to a + // disjunct. + v = v.DerefDisjunct() + useReference := v.IsShared + isCyclic := v.IsCyclic + s, ok := v.BaseValue.(*adt.Vertex) + v = v.DerefValue() + isCyclic = isCyclic || v.IsCyclic + if useReference && isCyclic && ok && len(v.Arcs) > 0 { + w.shared(s) + return v, true + } + return v, false +} + func (w *printer) shortError(errs errors.Error) { for { msg, args := errs.Msg() - fmt.Fprintf(w, msg, args...) + w.dst = fmt.Appendf(w.dst, msg, args...) err := errors.Unwrap(errs) if err == nil { @@ -144,7 +180,7 @@ func (w *printer) interpolation(x *adt.Interpolation) { } case adt.BytesKind: if s, ok := x.Parts[i].(*adt.Bytes); ok { - _, _ = w.Write(s.B) + w.dst = append(w.dst, s.B...) } else { w.string("") } @@ -161,6 +197,11 @@ func (w *printer) interpolation(x *adt.Interpolation) { func (w *printer) node(n adt.Node) { switch x := n.(type) { case *adt.Vertex: + x, ok := w.printShared(x) + if ok { + return + } + var kind adt.Kind if x.BaseValue != nil { kind = x.BaseValue.Kind() @@ -175,7 +216,7 @@ func (w *printer) node(n adt.Node) { } } - fmt.Fprintf(w, "(%s){", kindStr) + w.dst = fmt.Appendf(w.dst, "(%s){", kindStr) saved := w.indent w.indent += " " @@ -188,7 +229,7 @@ func (w *printer) node(n adt.Node) { saved := w.indent w.indent += "// " w.string("\n") - fmt.Fprintf(w, "[%v]", v.Code) + w.dst = fmt.Appendf(w.dst, "[%v]", v.Code) if !v.ChildError { msg := errors.Details(v.Err, &errors.Config{ Cwd: w.cfg.Cwd, @@ -199,6 +240,15 @@ func (w *printer) node(n adt.Node) { w.string(" ") w.string(msg) } + + // TODO: we could consider removing CycleError here. It does + // seem safer, however, as sometimes structural cycles are + // detected as regular cycles. + // Alternatively, we could consider to never report arcs if + // there is any error. + if v.Code == adt.CycleError || v.Code == adt.StructuralCycleError { + goto endVertex + } } w.indent = saved @@ -249,6 +299,16 @@ func (w *printer) node(n adt.Node) { w.indent += "// " w.string("// ") for i, c := range x.Conjuncts { + if c.CloseInfo.FromDef || c.CloseInfo.FromEmbed { + w.string("[") + if c.CloseInfo.FromDef { + w.string("d") + } + if c.CloseInfo.FromEmbed { + w.string("e") + } + w.string("]") + } if i > 0 { w.string(" & ") } @@ -256,6 +316,8 @@ func (w *printer) node(n adt.Node) { } } + endVertex: + w.indent = saved w.string("\n") w.string("}") @@ -296,8 +358,7 @@ func (w *printer) node(n adt.Node) { w.string("\n]") case *adt.Field: - s := w.labelString(x.Label) - w.string(s) + w.label(x.Label) w.string(x.ArcType.Suffix()) w.string(":") w.string(" ") @@ -305,8 +366,7 @@ func (w *printer) node(n adt.Node) { case *adt.LetField: w.string("let ") - s := w.labelString(x.Label) - w.string(s) + w.label(x.Label) if x.IsMulti { w.string("multi") } @@ -343,29 +403,29 @@ func (w *printer) node(n adt.Node) { w.string("null") case *adt.Bool: - fmt.Fprint(w, x.B) + w.dst = strconv.AppendBool(w.dst, x.B) case *adt.Num: - fmt.Fprint(w, &x.X) + w.string(x.X.String()) case *adt.String: - w.string(literal.String.Quote(x.Str)) + w.dst = literal.String.Append(w.dst, x.Str) case *adt.Bytes: - w.string(literal.Bytes.Quote(string(x.B))) + w.dst = literal.Bytes.Append(w.dst, string(x.B)) case *adt.Top: w.string("_") case *adt.BasicType: - fmt.Fprint(w, x.K) + w.string(x.K.String()) case *adt.BoundExpr: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.Expr) case *adt.BoundValue: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.Value) case *adt.NodeLink: @@ -380,25 +440,25 @@ func (w *printer) node(n adt.Node) { case *adt.FieldReference: w.string(openTuple) - w.string(strconv.Itoa(int(x.UpCount))) + w.int(int64(x.UpCount)) w.string(";") w.label(x.Label) w.string(closeTuple) case *adt.ValueReference: w.string(openTuple) - w.string(strconv.Itoa(int(x.UpCount))) + w.int(int64(x.UpCount)) w.string(closeTuple) case *adt.LabelReference: w.string(openTuple) - w.string(strconv.Itoa(int(x.UpCount))) + w.int(int64(x.UpCount)) w.string(";-") w.string(closeTuple) case *adt.DynamicReference: w.string(openTuple) - w.string(strconv.Itoa(int(x.UpCount))) + w.int(int64(x.UpCount)) w.string(";(") w.node(x.Label) w.string(")") @@ -411,7 +471,7 @@ func (w *printer) node(n adt.Node) { case *adt.LetReference: w.string(openTuple) - w.string(strconv.Itoa(int(x.UpCount))) + w.int(int64(x.UpCount)) w.string(";let ") w.label(x.Label) w.string(closeTuple) @@ -447,13 +507,15 @@ func (w *printer) node(n adt.Node) { w.interpolation(x) case *adt.UnaryExpr: - fmt.Fprint(w, x.Op) + w.string(x.Op.String()) w.node(x.X) case *adt.BinaryExpr: w.string("(") w.node(x.X) - fmt.Fprint(w, " ", x.Op, " ") + w.string(" ") + w.string(x.Op.String()) + w.string(" ") w.node(x.Y) w.string(")") @@ -510,6 +572,16 @@ func (w *printer) node(n adt.Node) { } w.string(")") + case *adt.ConjunctGroup: + w.string("&[") + for i, c := range *x { + if i > 0 { + w.string(", ") + } + w.node(c.Expr()) + } + w.string("]") + case *adt.Disjunction: w.string("|(") for i, c := range x.Values { diff --git a/vendor/cuelang.org/go/internal/core/dep/dep.go b/vendor/cuelang.org/go/internal/core/dep/dep.go index f9de8e2121..ebdc8ca0b2 100644 --- a/vendor/cuelang.org/go/internal/core/dep/dep.go +++ b/vendor/cuelang.org/go/internal/core/dep/dep.go @@ -232,9 +232,10 @@ func (v *visitor) visit(n *adt.Vertex, top bool) (err error) { } }() - for _, x := range n.Conjuncts { + n.VisitLeafConjuncts(func(x adt.Conjunct) bool { v.markExpr(x.Env, x.Elem()) - } + return true + }) return nil } @@ -485,11 +486,12 @@ func hasLetParent(v *adt.Vertex) bool { // markConjuncts transitively marks all reference of the current node. func (c *visitor) markConjuncts(v *adt.Vertex) { - for _, x := range v.Conjuncts { + v.VisitLeafConjuncts(func(x adt.Conjunct) bool { // Use Elem instead of Expr to preserve the Comprehension to, in turn, // ensure an Environment is inserted for the Value clause. c.markExpr(x.Env, x.Elem()) - } + return true + }) } // markInternalResolvers marks dependencies for rootless nodes. As these @@ -506,9 +508,10 @@ func (c *visitor) markInternalResolvers(env *adt.Environment, r adt.Resolver, v // As lets have no path and we otherwise will not process them, we set // processing all to true. if c.marked != nil && hasLetParent(v) { - for _, x := range v.Conjuncts { + v.VisitLeafConjuncts(func(x adt.Conjunct) bool { c.marked.markExpr(x.Expr()) - } + return true + }) } c.markConjuncts(v) @@ -621,6 +624,7 @@ func (c *visitor) markComprehension(env *adt.Environment, y *adt.Comprehension) for i := y.Nest(); i > 0; i-- { env = &adt.Environment{Up: env, Vertex: empty} } + // TODO: consider using adt.EnvExpr and remove the above loop. c.markExpr(env, adt.ToExpr(y.Value)) } diff --git a/vendor/cuelang.org/go/internal/core/dep/mixed.go b/vendor/cuelang.org/go/internal/core/dep/mixed.go index 05c25912c0..51ab7dbe28 100644 --- a/vendor/cuelang.org/go/internal/core/dep/mixed.go +++ b/vendor/cuelang.org/go/internal/core/dep/mixed.go @@ -29,12 +29,13 @@ import ( // and comprehension sources. func (v *visitor) dynamic(n *adt.Vertex, top bool) { found := false - for _, c := range n.Conjuncts { + n.VisitLeafConjuncts(func(c adt.Conjunct) bool { if v.marked[c.Expr()] { found = true - break + return false } - } + return true + }) if !found { return @@ -66,9 +67,10 @@ func (m marked) markExpr(x adt.Expr) { case nil: case *adt.Vertex: - for _, c := range x.Conjuncts { + x.VisitLeafConjuncts(func(c adt.Conjunct) bool { m.markExpr(c.Expr()) - } + return true + }) case *adt.BinaryExpr: if x.Op == adt.AndOp { diff --git a/vendor/cuelang.org/go/internal/core/eval/eval.go b/vendor/cuelang.org/go/internal/core/eval/eval.go index 8266bdf04b..dc0f9bf400 100644 --- a/vendor/cuelang.org/go/internal/core/eval/eval.go +++ b/vendor/cuelang.org/go/internal/core/eval/eval.go @@ -21,18 +21,18 @@ import ( ) func Evaluate(r adt.Runtime, v *adt.Vertex) { - format := func(n adt.Node) string { - return debug.NodeString(r, n, printConfig) - } c := adt.New(v, &adt.Config{ Runtime: r, - Format: format, + Format: nodeFormat, }) v.Finalize(c) } func New(r adt.Runtime) *Unifier { - return &Unifier{r: r, e: NewContext(r, nil)} + ctx := NewContext(r, nil) + // TODO: we could access these directly if we can use runtime.Runtime directly. + ctx.Version, ctx.Config = r.Settings() + return &Unifier{r: r, e: ctx} } type Unifier struct { @@ -47,12 +47,9 @@ func (e *Unifier) Stats() *stats.Counts { // TODO: Note: NewContext takes essentially a cue.Value. By making this // type more central, we can perhaps avoid context creation. func NewContext(r adt.Runtime, v *adt.Vertex) *adt.OpContext { - format := func(n adt.Node) string { - return debug.NodeString(r, n, printConfig) - } return adt.New(v, &adt.Config{ Runtime: r, - Format: format, + Format: nodeFormat, }) } @@ -61,3 +58,7 @@ func (e *Unifier) NewContext(v *adt.Vertex) *adt.OpContext { } var printConfig = &debug.Config{Compact: true} + +func nodeFormat(r adt.Runtime, n adt.Node) string { + return debug.NodeString(r, n, printConfig) +} diff --git a/vendor/cuelang.org/go/internal/core/export/adt.go b/vendor/cuelang.org/go/internal/core/export/adt.go index 26fcfc0d2e..2d2a14c493 100644 --- a/vendor/cuelang.org/go/internal/core/export/adt.go +++ b/vendor/cuelang.org/go/internal/core/export/adt.go @@ -260,6 +260,14 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr { } return ast.NewBinExpr(token.OR, a...) + case *adt.ConjunctGroup: + a := []ast.Expr{} + for _, c := range *x { + v := e.expr(c.EnvExpr()) + a = append(a, v) + } + return ast.NewBinExpr(token.AND, a...) + case *adt.Comprehension: if !x.DidResolve() { return dummyTop @@ -285,6 +293,7 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr { env = &adt.Environment{Up: env, Vertex: empty} } + // TODO: consider using adt.EnvExpr. return e.adt(env, adt.ToExpr(x.Value)) default: @@ -696,6 +705,7 @@ func (e *exporter) comprehension(env *adt.Environment, comp *adt.Comprehension) env = &adt.Environment{Up: env, Vertex: empty} } + // TODO: consider using adt.EnvExpr. v := e.expr(env, adt.ToExpr(comp.Value)) if _, ok := v.(*ast.StructLit); !ok { v = ast.NewStruct(ast.Embed(v)) diff --git a/vendor/cuelang.org/go/internal/core/export/export.go b/vendor/cuelang.org/go/internal/core/export/export.go index c7f44cd7d6..5b294331fe 100644 --- a/vendor/cuelang.org/go/internal/core/export/export.go +++ b/vendor/cuelang.org/go/internal/core/export/export.go @@ -60,6 +60,11 @@ type Profile struct { // SelfContained exports a schema such that it does not rely on any imports. SelfContained bool + // Fragment disables printing a value as self contained. To successfully + // parse a fragment, the compiler needs to be given a scope with the value + // from which the fragment was extracted. + Fragment bool + // AddPackage causes a package clause to be added. AddPackage bool @@ -135,7 +140,7 @@ func (p *Profile) Def(r adt.Runtime, pkgID string, v *adt.Vertex) (f *ast.File, // TODO: embed an empty definition instead once we verify that this // preserves semantics. - if v.Kind() == adt.StructKind { + if v.Kind() == adt.StructKind && !p.Fragment { expr = ast.NewStruct( ast.Embed(ast.NewIdent("_#def")), ast.NewIdent("_#def"), expr, @@ -183,13 +188,13 @@ func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) *ast.File { if e.cfg.AddPackage { pkgName := "" pkg := &ast.Package{} - for _, c := range v.Conjuncts { + v.VisitLeafConjuncts(func(c adt.Conjunct) bool { f, _ := c.Source().(*ast.File) if f == nil { - continue + return true } - if _, name, _ := internal.PackageInfo(f); name != "" { + if name := f.PackageName(); name != "" { pkgName = name } @@ -198,7 +203,8 @@ func (e *exporter) toFile(v *adt.Vertex, x ast.Expr) *ast.File { ast.AddComment(pkg, doc) } } - } + return true + }) if pkgName != "" { pkg.Name = ast.NewIdent(pkgName) @@ -357,12 +363,12 @@ func newExporter(p *Profile, r adt.Runtime, pkgID string, v adt.Value) *exporter // initPivot initializes the pivotter to allow aligning a configuration around // a new root, if needed. func (e *exporter) initPivot(n *adt.Vertex) { - if !e.cfg.InlineImports && - !e.cfg.SelfContained && - n.Parent == nil { + switch { + case e.cfg.SelfContained, e.cfg.InlineImports: + // Explicitly enabled. + case n.Parent == nil, e.cfg.Fragment: return } - e.initPivotter(n) } @@ -389,9 +395,10 @@ func (e *exporter) markUsedFeatures(x adt.Expr) { switch x := n.(type) { case *adt.Vertex: if !x.IsData() { - for _, c := range x.Conjuncts { + x.VisitLeafConjuncts(func(c adt.Conjunct) bool { w.Elem(c.Elem()) - } + return true + }) } case *adt.DynamicReference: @@ -571,7 +578,8 @@ func (e *exporter) resolveLet(env *adt.Environment, x *adt.LetReference) ast.Exp return e.expr(env, x.X) } - return e.expr(env, ref.Conjuncts[0].Expr()) + c, _ := ref.SingleConjunct() + return e.expr(c.EnvExpr()) case let.Expr == nil: label := e.uniqueLetIdent(x.Label, x.X) @@ -633,7 +641,7 @@ func (e *exporter) makeFeature(s string) (f adt.Feature, ok bool) { // uniqueFeature returns a name for an identifier that uniquely identifies // the given expression. If the preferred name is already taken, a new globally -// unique name of the form base_X ... base_XXXXXXXXXXXXXX is generated. +// unique name of the form base_N ... base_NNNNNNNNNNNNNN is generated. // // It prefers short extensions over large ones, while ensuring the likelihood of // fast termination is high. There are at least two digits to make it visually diff --git a/vendor/cuelang.org/go/internal/core/export/expr.go b/vendor/cuelang.org/go/internal/core/export/expr.go index 20106c21be..19bf1c3d1c 100644 --- a/vendor/cuelang.org/go/internal/core/export/expr.go +++ b/vendor/cuelang.org/go/internal/core/export/expr.go @@ -79,12 +79,13 @@ func (e *exporter) expr(env *adt.Environment, v adt.Elem) (result ast.Expr) { } // Should this be the arcs label? a := []conjunct{} - for _, c := range x.Conjuncts { + x.VisitLeafConjuncts(func(c adt.Conjunct) bool { if c, ok := c.Elem().(*adt.Comprehension); ok && !c.DidResolve() { - continue + return true } a = append(a, conjunct{c, 0}) - } + return true + }) return e.mergeValues(adt.InvalidLabel, x, a, x.Conjuncts...) @@ -267,7 +268,8 @@ func (x *exporter) mergeValues(label adt.Feature, src *adt.Vertex, a []conjunct, internal.SetConstraint(d, field.arcType.Token()) if x.cfg.ShowDocs { - docs := extractDocs(src, a) + v := &adt.Vertex{Conjuncts: a} + docs := extractDocs(v) ast.SetComments(d, docs) } if x.cfg.ShowAttributes { @@ -416,7 +418,7 @@ func (e *conjuncts) addExpr(env *adt.Environment, src *adt.Vertex, x adt.Elem, i e.addValueConjunct(src, env, x) case *adt.Vertex: - if b, ok := v.BaseValue.(*adt.Bottom); ok { + if b := v.Bottom(); b != nil { if !b.IsIncomplete() || e.cfg.Final { e.addExpr(env, v, b, false) return @@ -425,9 +427,10 @@ func (e *conjuncts) addExpr(env *adt.Environment, src *adt.Vertex, x adt.Elem, i switch { default: - for _, c := range v.Conjuncts { + v.VisitLeafConjuncts(func(c adt.Conjunct) bool { e.addExpr(c.Env, v, c.Elem(), false) - } + return true + }) case v.IsData(): e.structs = append(e.structs, v.Structs...) diff --git a/vendor/cuelang.org/go/internal/core/export/extract.go b/vendor/cuelang.org/go/internal/core/export/extract.go index 087da045fb..7c399c7d25 100644 --- a/vendor/cuelang.org/go/internal/core/export/extract.go +++ b/vendor/cuelang.org/go/internal/core/export/extract.go @@ -29,24 +29,24 @@ import ( // // comment // foo: bar: 2 func ExtractDoc(v *adt.Vertex) (docs []*ast.CommentGroup) { - return extractDocs(v, v.Conjuncts) + return extractDocs(v) } -func extractDocs(v *adt.Vertex, a []adt.Conjunct) (docs []*ast.CommentGroup) { +func extractDocs(v *adt.Vertex) (docs []*ast.CommentGroup) { fields := []*ast.Field{} // Collect docs directly related to this Vertex. - for _, x := range a { + v.VisitLeafConjuncts(func(x adt.Conjunct) bool { // TODO: Is this still being used? if v, ok := x.Elem().(*adt.Vertex); ok { - docs = append(docs, extractDocs(v, v.Conjuncts)...) - continue + docs = append(docs, extractDocs(v)...) + return true } switch f := x.Field().Source().(type) { case *ast.Field: if hasShorthandValue(f) { - continue + return true } fields = append(fields, f) for _, cg := range f.Comments() { @@ -60,21 +60,19 @@ func extractDocs(v *adt.Vertex, a []adt.Conjunct) (docs []*ast.CommentGroup) { docs = append(docs, c) } } - } - if v == nil { - return docs - } + return true + }) // Collect docs from parent scopes in collapsed fields. for p := v.Parent; p != nil; p = p.Parent { newFields := []*ast.Field{} - for _, x := range p.Conjuncts { + p.VisitLeafConjuncts(func(x adt.Conjunct) bool { f, ok := x.Source().(*ast.Field) if !ok || !hasShorthandValue(f) { - continue + return true } nested := nestedField(f) @@ -88,7 +86,8 @@ func extractDocs(v *adt.Vertex, a []adt.Conjunct) (docs []*ast.CommentGroup) { } } } - } + return true + }) fields = newFields } @@ -144,9 +143,10 @@ func containsDoc(a []*ast.CommentGroup, cg *ast.CommentGroup) bool { } func ExtractFieldAttrs(v *adt.Vertex) (attrs []*ast.Attribute) { - for _, x := range v.Conjuncts { + v.VisitLeafConjuncts(func(x adt.Conjunct) bool { attrs = extractFieldAttrs(attrs, x.Field()) - } + return true + }) return attrs } diff --git a/vendor/cuelang.org/go/internal/core/export/self.go b/vendor/cuelang.org/go/internal/core/export/self.go index 267b98f626..11a633ce09 100644 --- a/vendor/cuelang.org/go/internal/core/export/self.go +++ b/vendor/cuelang.org/go/internal/core/export/self.go @@ -447,7 +447,7 @@ func (p *pivotter) addExternal(d *depData) { ast.SetRelPos(let, token.NewSection) - path := p.x.ctx.PathToString(p.x.ctx, d.node().Path()) + path := p.x.ctx.PathToString(d.node().Path()) var msg string if d.dstImport == nil { msg = fmt.Sprintf("//cue:path: %s", path) diff --git a/vendor/cuelang.org/go/internal/core/export/toposort.go b/vendor/cuelang.org/go/internal/core/export/toposort.go index b7c52bb0bd..bda694a99c 100644 --- a/vendor/cuelang.org/go/internal/core/export/toposort.go +++ b/vendor/cuelang.org/go/internal/core/export/toposort.go @@ -52,9 +52,10 @@ func VertexFeatures(c *adt.OpContext, v *adt.Vertex) []adt.Feature { } func extractFeatures(in []*adt.StructInfo) (a [][]adt.Feature) { + a = make([][]adt.Feature, 0, len(in)) for _, s := range in { - sorted := []adt.Feature{} - for _, e := range s.StructLit.Decls { + sorted := make([]adt.Feature, 0, len(s.Decls)) + for _, e := range s.Decls { switch x := e.(type) { case *adt.Field: sorted = append(sorted, x.Label) diff --git a/vendor/cuelang.org/go/internal/core/export/value.go b/vendor/cuelang.org/go/internal/core/export/value.go index e54aefcbc7..8d2507c1d8 100644 --- a/vendor/cuelang.org/go/internal/core/export/value.go +++ b/vendor/cuelang.org/go/internal/core/export/value.go @@ -102,11 +102,12 @@ func (e *exporter) vertex(n *adt.Vertex) (result ast.Expr) { if result == nil { // fall back to expression mode a := []ast.Expr{} - for _, c := range n.Conjuncts { + n.VisitLeafConjuncts(func(c adt.Conjunct) bool { if x := e.expr(c.Env, c.Elem()); x != dummyTop { a = append(a, x) } - } + return true + }) result = ast.NewBinExpr(token.AND, a...) } @@ -431,7 +432,7 @@ func (e *exporter) structComposite(v *adt.Vertex, attrs []*ast.Attribute) ast.Ex e.inDefinition++ } - arc := v.Lookup(label) + arc := v.LookupRaw(label) if arc == nil { continue } @@ -445,7 +446,7 @@ func (e *exporter) structComposite(v *adt.Vertex, attrs []*ast.Attribute) ast.Ex internal.SetConstraint(f, arc.ArcType.Token()) - f.Value = e.vertex(arc) + f.Value = e.vertex(arc.DerefValue()) if label.IsDef() { e.inDefinition-- diff --git a/vendor/cuelang.org/go/internal/core/runtime/build.go b/vendor/cuelang.org/go/internal/core/runtime/build.go index 7d35a4321f..2f21effbb9 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/build.go +++ b/vendor/cuelang.org/go/internal/core/runtime/build.go @@ -23,7 +23,6 @@ import ( "cuelang.org/go/cue/errors" "cuelang.org/go/cue/stats" "cuelang.org/go/cue/token" - "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/compile" ) @@ -119,7 +118,7 @@ func (r *Runtime) CompileFile(cfg *Config, file *ast.File) (*adt.Vertex, *build. if err != nil { return nil, p } - _, p.PkgName, _ = internal.PackageInfo(file) + p.PkgName = file.PackageName() v, _ := r.Build(cfg, p) return v, p } diff --git a/vendor/cuelang.org/go/internal/core/runtime/extern.go b/vendor/cuelang.org/go/internal/core/runtime/extern.go index d286d4ee38..423df562ad 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/extern.go +++ b/vendor/cuelang.org/go/internal/core/runtime/extern.go @@ -40,19 +40,22 @@ func (r *Runtime) SetInterpreter(i Interpreter) { // Interpreter defines an entrypoint for creating per-package interpreters. type Interpreter interface { // NewCompiler creates a compiler for b and reports any errors. - NewCompiler(b *build.Instance) (Compiler, errors.Error) + NewCompiler(b *build.Instance, r *Runtime) (Compiler, errors.Error) // Kind returns the string to be used in the file-level @extern attribute. Kind() string } -// A Compiler composes an adt.Builtin for an external function implementation. +// A Compiler fills in an adt.Expr for fields marked with `@extern(kind)`. type Compiler interface { - // Compile creates a builtin for the given function name and attribute. - // funcName is the name of the function to compile, taken from altName in - // @extern(name=altName), or from the field name if that's not defined. - // Other than "name", the fields in a are implementation specific. - Compile(funcName string, a *internal.Attr) (*adt.Builtin, errors.Error) + // Compile creates an adt.Expr (usually a builtin) for the + // given external named resource (usually a function). name + // is the name of the resource to compile, taken from altName + // in `@extern(name=altName)`, or from the field name if that's + // not defined. Scope is the struct that contains the field. + // Other than "name", the fields in a are implementation + // specific. + Compile(name string, scope adt.Value, a *internal.Attr) (adt.Expr, errors.Error) } // injectImplementations modifies v to include implementations of functions @@ -71,9 +74,10 @@ func (r *Runtime) injectImplementations(b *build.Instance, v *adt.Vertex) (errs d.errs = errors.Append(d.errs, d.addFile(f)) } - for _, c := range v.Conjuncts { - d.decorateConjunct(c.Elem()) - } + v.VisitLeafConjuncts(func(c adt.Conjunct) bool { + d.decorateConjunct(c.Elem(), v) + return true + }) return d.errs } @@ -96,7 +100,6 @@ type externDecorator struct { } type fieldInfo struct { - file *ast.File extern string funcName string attrBody string @@ -215,7 +218,7 @@ func (d *externDecorator) initCompiler(kind string, pos token.Pos) (ok bool, err return false, errors.Newf(pos, "no interpreter defined for %q", kind) } - c, err := x.NewCompiler(d.pkg) + c, err := x.NewCompiler(d.pkg, d.runtime) if err != nil { return false, err } @@ -289,14 +292,16 @@ func (d *externDecorator) markExternFieldAttr(kind string, decls []ast.Decl) (er return errs } -func (d *externDecorator) decorateConjunct(e adt.Elem) { - w := walk.Visitor{Before: d.processADTNode} +func (d *externDecorator) decorateConjunct(e adt.Elem, scope *adt.Vertex) { + w := walk.Visitor{Before: func(n adt.Node) bool { + return d.processADTNode(n, scope) + }} w.Elem(e) } // processADTNode injects a builtin conjunct into n if n is an adt.Field and // has a marked ast.Field associated with it. -func (d *externDecorator) processADTNode(n adt.Node) bool { +func (d *externDecorator) processADTNode(n adt.Node, scope *adt.Vertex) bool { f, ok := n.(*adt.Field) if !ok { return true @@ -324,7 +329,7 @@ func (d *externDecorator) processADTNode(n adt.Node) bool { name = str } - b, err := c.Compile(name, &attr) + b, err := c.Compile(name, scope, &attr) if err != nil { err = errors.Newf(info.attr.Pos(), "can't load from external module: %v", err) d.errs = errors.Append(d.errs, err) diff --git a/vendor/cuelang.org/go/internal/core/runtime/runtime.go b/vendor/cuelang.org/go/internal/core/runtime/runtime.go index 0ab92e85d5..5b54015f02 100644 --- a/vendor/cuelang.org/go/internal/core/runtime/runtime.go +++ b/vendor/cuelang.org/go/internal/core/runtime/runtime.go @@ -16,6 +16,8 @@ package runtime import ( "cuelang.org/go/cue/build" + "cuelang.org/go/internal" + "cuelang.org/go/internal/cuedebug" ) // A Runtime maintains data structures for indexing and reuse for evaluation. @@ -27,6 +29,14 @@ type Runtime struct { // interpreters implement extern functionality. The map key corresponds to // the kind in a file-level @extern(kind) attribute. interpreters map[string]Interpreter + + version internal.EvaluatorVersion + + flags cuedebug.Config +} + +func (r *Runtime) Settings() (internal.EvaluatorVersion, cuedebug.Config) { + return r.version, r.flags } func (r *Runtime) SetBuildData(b *build.Instance, x interface{}) { @@ -38,14 +48,34 @@ func (r *Runtime) BuildData(b *build.Instance) (x interface{}, ok bool) { return x, ok } -// New creates a new Runtime. The builtins registered with RegisterBuiltin -// are available for +// New is a wrapper for NewVersioned(internal.DefaultVersion). func New() *Runtime { r := &Runtime{} r.Init() return r } +// NewWithSettings creates a new Runtime using the given runtime version and +// debug flags. The builtins registered with RegisterBuiltin are available for +// evaluation. +func NewWithSettings(v internal.EvaluatorVersion, flags cuedebug.Config) *Runtime { + r := &Runtime{version: v, flags: flags} + r.Init() + return r +} + +// SetVersion sets the version to use for the Runtime. This should only be set +// before first use. +func (r *Runtime) SetVersion(v internal.EvaluatorVersion) { + r.version = v +} + +// SetDebugOptions sets the debug flags to use for the Runtime. This should only +// be set before first use. +func (r *Runtime) SetDebugOptions(flags *cuedebug.Config) { + r.flags = *flags +} + // IsInitialized reports whether the runtime has been initialized. func (r *Runtime) IsInitialized() bool { return r.index != nil diff --git a/vendor/cuelang.org/go/internal/core/subsume/value.go b/vendor/cuelang.org/go/internal/core/subsume/value.go index bea61af90e..cdb5fdfcd8 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/value.go +++ b/vendor/cuelang.org/go/internal/core/subsume/value.go @@ -102,11 +102,16 @@ func (s *subsumer) values(a, b adt.Value) (result bool) { return x == b case *adt.BuiltinValidator: - if y := s.ctx.Validate(x, b); y != nil { - s.errs = errors.Append(s.errs, y.Err) - return false + state := s.ctx.PushState(s.ctx.Env(0), b.Source()) + b1 := s.ctx.Validate(x, b) + if b1 != nil { + s.errs = errors.Append(s.errs, b1.Err) } - return true + b2 := s.ctx.PopState(state) + if b2 != nil { + s.errs = errors.Append(s.errs, b2.Err) + } + return b1 == nil && b2 == nil case *adt.Null: return b.Kind() == adt.NullKind diff --git a/vendor/cuelang.org/go/internal/core/subsume/vertex.go b/vendor/cuelang.org/go/internal/core/subsume/vertex.go index 49d9453689..f4465fd4d7 100644 --- a/vendor/cuelang.org/go/internal/core/subsume/vertex.go +++ b/vendor/cuelang.org/go/internal/core/subsume/vertex.go @@ -17,6 +17,7 @@ package subsume import ( "fmt" + "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/core/export" ) @@ -28,6 +29,9 @@ import ( // // TODO(perf): use merge sort where possible. func (s *subsumer) vertices(x, y *adt.Vertex) bool { + if s.ctx.Version == internal.DevVersion { + return s.verticesDev(x, y) + } if x == y { return true } @@ -39,7 +43,7 @@ func (s *subsumer) vertices(x, y *adt.Vertex) bool { y = y.Default() } - if b, _ := y.BaseValue.(*adt.Bottom); b != nil { + if b := y.Bottom(); b != nil { // If the value is incomplete, the error is not final. So either check // structural equivalence or return an error. return !b.IsIncomplete() @@ -233,6 +237,240 @@ outer: return true } +// verticesDev replaces vertices with the implementation of the new evaluator. +func (s *subsumer) verticesDev(x, y *adt.Vertex) bool { + if x == y { + return true + } + if a, b := x.ArcType, y.ArcType; a < b { + return false + } + + if s.Defaults { + y = y.Default() + } + + if b := y.Bottom(); b != nil { + // If the value is incomplete, the error is not final. So either check + // structural equivalence or return an error. + return !b.IsIncomplete() + } + + ctx := s.ctx + + final := y.IsData() || s.Final + + switch v := x.BaseValue.(type) { + case *adt.Bottom: + return false + + case *adt.ListMarker: + if !y.IsList() { + s.errf("list does not subsume %v (type %s)", y, y.Kind()) + return false + } + if !s.listVertices(x, y) { + return false + } + // TODO: allow other arcs alongside list arc. + return true + + case *adt.StructMarker: + _, ok := y.BaseValue.(*adt.StructMarker) + if !ok { + return false + } + + case adt.Value: + if !s.values(v, y.Value()) { + return false + } + + // Embedded scalars could still have arcs. + if final { + return true + } + + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + + xClosed := x.IsClosedStruct() && !s.IgnoreClosedness + // TODO: this should not close for taking defaults. Do a more principled + // makeover of this package before making it public, though. + yClosed := s.Final || s.Defaults || + (y.IsClosedStruct() && !s.IgnoreClosedness) + + if xClosed && !yClosed && !final { + return false + } + + // From here, verticesDev differs significantly from vertices. + + for _, a := range x.Arcs { + f := a.Label + if s.Final && !f.IsRegular() { + continue + } + + isConstraint := false + switch a.ArcType { + case adt.ArcOptional: + if s.IgnoreOptional { + continue + } + + if a.Kind() == adt.TopKind { + continue + } + + isConstraint = true + + case adt.ArcRequired: + // TODO: what to do with required fields. Logically they should be + // ignored if subsuming at the value level. OTOH, they represent an + // (incomplete) error at the value level. + // Mimic the old evaluator for now. + if s.IgnoreOptional { + continue + } + // If field a is optional and has value top, neither the + // omission of the field nor the field defined with any value + // may cause unification to fail. + if a.Kind() == adt.TopKind { + continue + } + + isConstraint = true + } + + b := y.Lookup(f) + if b == nil { + if !isConstraint { + s.errf("regular field is constraint in subsumed value: %v", f) + return false + } + + // If f is undefined for y and if y is closed, the field is + // implicitly defined as _|_ and thus subsumed. Technically, this is + // even true if a is not optional, but in that case it means that y + // is invalid, so return false regardless + if !y.Accept(ctx, f) || y.IsData() || s.Final { + continue + } + + // There is no explicit field, but the values of pattern constraints + // may still be relevant. + b = &adt.Vertex{Label: f} + y.MatchAndInsert(ctx, b) + b.Finalize(ctx) + } + + if s.values(a, b) { + continue + } + + s.missing = f + s.gt = a + s.lt = y + + s.errf("field %v not present in %v", f, y) + return false + } + + if xClosed && !yClosed && !s.Final { + s.errf("closed struct does not subsume open struct") + return false + } + +outer: + for _, b := range y.Arcs { + f := b.Label + + if s.Final && !f.IsRegular() { + continue + } + + if b.IsConstraint() && (s.IgnoreOptional || s.Final) { + continue + } + + for _, a := range x.Arcs { + g := a.Label + if g == f { + // already validated + continue outer + } + } + + if !x.Accept(ctx, f) { + if s.Profile.IgnoreClosedness { + continue + } + s.errf("field not allowed in closed struct: %v", f) + return false + } + + a := &adt.Vertex{Label: f} + x.MatchAndInsert(ctx, a) + if len(a.Conjuncts) == 0 { + // It is accepted and has no further constraints, so all good. + continue + } + + a.Finalize(ctx) + + if !s.vertices(a, b) { + return false + } + } + + // Now compare pattern constraints. + apc := x.PatternConstraints + bpc := y.PatternConstraints + if bpc == nil { + if apc == nil { + return true + } + if y.IsClosedList() || y.IsClosedList() || final { + // This is a special case where know that any allowed optional field + // in a must be bottom in y, which is strictly more specific. + return true + } + return false + } + if apc == nil { + return true + } + if len(apc.Pairs) > len(bpc.Pairs) { + // Theoretically it is still possible for a to subsume b, but it will + // somewhat tricky and expensive to compute and it is probably not worth + // it. + s.inexact = true + return false + } + +outerConstraint: + for _, p := range apc.Pairs { + for _, q := range bpc.Pairs { + if adt.Equal(s.ctx, p.Pattern, q.Pattern, 0) { + if !s.values(p.Constraint, q.Constraint) { + return false + } + continue outerConstraint + } + } + // We have a pattern in a that does not exist in b. Theoretically a + // could still subsume b if the values of the patterns in b combined + // subsume this value. + // TODO: consider whether it is worth computing this. + s.inexact = true + return false + } + + return true +} + func (s *subsumer) listVertices(x, y *adt.Vertex) bool { ctx := s.ctx diff --git a/vendor/cuelang.org/go/internal/core/validate/validate.go b/vendor/cuelang.org/go/internal/core/validate/validate.go index 66ac7d5330..08ee960aa4 100644 --- a/vendor/cuelang.org/go/internal/core/validate/validate.go +++ b/vendor/cuelang.org/go/internal/core/validate/validate.go @@ -70,7 +70,11 @@ func (v *validator) add(b *adt.Bottom) { func (v *validator) validate(x *adt.Vertex) { defer v.ctx.PopArc(v.ctx.PushArc(x)) - if b, _ := x.BaseValue.(*adt.Bottom); b != nil { + // Dereference values, but only those that are non-rooted. This includes let + // values. This prevents us from processing structure-shared nodes more than + // once and prevents potential cycles. + x = x.DerefNonRooted() + if b := x.Bottom(); b != nil { switch b.Code { case adt.CycleError: if v.checkConcrete() || v.DisallowCycles { diff --git a/vendor/cuelang.org/go/internal/core/walk/walk.go b/vendor/cuelang.org/go/internal/core/walk/walk.go index 20002fdbac..88b7ef2d21 100644 --- a/vendor/cuelang.org/go/internal/core/walk/walk.go +++ b/vendor/cuelang.org/go/internal/core/walk/walk.go @@ -62,6 +62,11 @@ func (w *Visitor) node(n adt.Node) { // TODO: special-case Vertex? case adt.Value: + case *adt.ConjunctGroup: + for _, x := range *x { + w.Elem(x.Elem()) + } + case *adt.ListLit: for _, x := range x.Elems { w.node(x) diff --git a/vendor/cuelang.org/go/internal/cueconfig/config.go b/vendor/cuelang.org/go/internal/cueconfig/config.go new file mode 100644 index 0000000000..93587c5b05 --- /dev/null +++ b/vendor/cuelang.org/go/internal/cueconfig/config.go @@ -0,0 +1,220 @@ +// Package cueconfig holds internal API relating to CUE configuration. +package cueconfig + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" + + "cuelang.org/go/internal/golangorgx/tools/robustio" + "cuelang.org/go/internal/mod/modresolve" + "github.com/rogpeppe/go-internal/lockedfile" + "golang.org/x/oauth2" +) + +// Logins holds the login information as stored in $CUE_CONFIG_DIR/logins.cue. +type Logins struct { + // TODO: perhaps add a version string to simplify making changes in the future + + // TODO: Sooner or later we will likely need more than one token per registry, + // such as when our central registry starts using scopes. + + Registries map[string]RegistryLogin `json:"registries"` +} + +// RegistryLogin holds the login information for one registry. +type RegistryLogin struct { + // These fields mirror [oauth2.Token]. + // We don't directly reference the type so we can be in control of our file format. + // Note that Expiry is a pointer, so omitempty can work as intended. + + AccessToken string `json:"access_token"` + + TokenType string `json:"token_type,omitempty"` + + RefreshToken string `json:"refresh_token,omitempty"` + + Expiry *time.Time `json:"expiry,omitempty"` +} + +func LoginConfigPath(getenv func(string) string) (string, error) { + configDir, err := ConfigDir(getenv) + if err != nil { + return "", err + } + return filepath.Join(configDir, "logins.json"), nil +} + +func ConfigDir(getenv func(string) string) (string, error) { + if dir := getenv("CUE_CONFIG_DIR"); dir != "" { + return dir, nil + } + dir, err := os.UserConfigDir() + if err != nil { + return "", fmt.Errorf("cannot determine system config directory: %v", err) + } + return filepath.Join(dir, "cue"), nil +} + +func CacheDir(getenv func(string) string) (string, error) { + if dir := getenv("CUE_CACHE_DIR"); dir != "" { + return dir, nil + } + dir, err := os.UserCacheDir() + if err != nil { + return "", fmt.Errorf("cannot determine system cache directory: %v", err) + } + return filepath.Join(dir, "cue"), nil +} + +func ReadLogins(path string) (*Logins, error) { + // Note that we read logins.json without holding a file lock, + // as the file lock is only held for writes. Prevent ephemeral errors on Windows. + body, err := robustio.ReadFile(path) + if err != nil { + return nil, err + } + logins := &Logins{ + // Initialize the map so we can insert entries. + Registries: map[string]RegistryLogin{}, + } + if err := json.Unmarshal(body, logins); err != nil { + return nil, err + } + // Sanity-check the read data. + for regName, regLogin := range logins.Registries { + if regLogin.AccessToken == "" { + return nil, fmt.Errorf("invalid %s: missing access_token for registry %s", path, regName) + } + } + return logins, nil +} + +func WriteLogins(path string, logins *Logins) error { + if err := os.MkdirAll(filepath.Dir(path), 0o777); err != nil { + return err + } + + unlock, err := lockedfile.MutexAt(path + ".lock").Lock() + if err != nil { + return err + } + defer unlock() + + return writeLoginsUnlocked(path, logins) +} + +func writeLoginsUnlocked(path string, logins *Logins) error { + // Indenting and a trailing newline are not necessary, but nicer to humans. + body, err := json.MarshalIndent(logins, "", "\t") + if err != nil { + return err + } + body = append(body, '\n') + + // Write to a temp file and then try to atomically rename to avoid races + // with parallel reading since we don't lock at FS level in ReadLogins. + if err := os.WriteFile(path+".tmp", body, 0o600); err != nil { + return err + } + // TODO: on non-POSIX platforms os.Rename might not be atomic. Might need to + // find another solution. Note that Windows NTFS is also atomic. + if err := robustio.Rename(path+".tmp", path); err != nil { + return err + } + + return nil +} + +// UpdateRegistryLogin atomically updates a single registry token in the logins.json file. +func UpdateRegistryLogin(path string, key string, new *oauth2.Token) (*Logins, error) { + if err := os.MkdirAll(filepath.Dir(path), 0o777); err != nil { + return nil, err + } + + unlock, err := lockedfile.MutexAt(path + ".lock").Lock() + if err != nil { + return nil, err + } + defer unlock() + + logins, err := ReadLogins(path) + if errors.Is(err, fs.ErrNotExist) { + // No config file yet; create an empty one. + logins = &Logins{Registries: make(map[string]RegistryLogin)} + } else if err != nil { + return nil, err + } + + logins.Registries[key] = LoginFromToken(new) + + err = writeLoginsUnlocked(path, logins) + if err != nil { + return nil, err + } + + return logins, nil +} + +// RegistryOAuthConfig returns the oauth2 configuration +// suitable for talking to the central registry. +func RegistryOAuthConfig(host modresolve.Host) oauth2.Config { + // For now, we use the OAuth endpoints as implemented by registry.cue.works, + // but other OCI registries may support the OAuth device flow with different ones. + // + // TODO: Query /.well-known/oauth-authorization-server to obtain + // token_endpoint and device_authorization_endpoint per the Oauth RFCs: + // * https://datatracker.ietf.org/doc/html/rfc8414#section-3 + // * https://datatracker.ietf.org/doc/html/rfc8628#section-4 + scheme := "https://" + if host.Insecure { + scheme = "http://" + } + return oauth2.Config{ + Endpoint: oauth2.Endpoint{ + DeviceAuthURL: scheme + host.Name + "/login/device/code", + TokenURL: scheme + host.Name + "/login/oauth/token", + }, + } +} + +// TODO: Encrypt the JSON file if the system has a secret store available, +// such as libsecret on Linux. Such secret stores tend to have low size limits, +// so rather than store the entire JSON blob there, store an encryption key. +// There are a number of Go packages which integrate with multiple OS keychains. +// +// The encrypted form of logins.json can be logins.json.enc, for example. +// If a user has an existing logins.json file and encryption is available, +// we should replace the file with logins.json.enc transparently. + +// TODO: When running "cue login", try to prevent overwriting concurrent changes +// when writing to the file on disk. For example, grab a lock, or check if the size +// changed between reading and writing the file. + +func TokenFromLogin(login RegistryLogin) *oauth2.Token { + tok := &oauth2.Token{ + AccessToken: login.AccessToken, + TokenType: login.TokenType, + RefreshToken: login.RefreshToken, + } + if login.Expiry != nil { + tok.Expiry = *login.Expiry + } + return tok +} + +func LoginFromToken(tok *oauth2.Token) RegistryLogin { + login := RegistryLogin{ + AccessToken: tok.AccessToken, + TokenType: tok.TokenType, + RefreshToken: tok.RefreshToken, + } + if !tok.Expiry.IsZero() { + login.Expiry = &tok.Expiry + } + return login +} diff --git a/vendor/cuelang.org/go/internal/cuedebug/debug.go b/vendor/cuelang.org/go/internal/cuedebug/debug.go new file mode 100644 index 0000000000..23751575ba --- /dev/null +++ b/vendor/cuelang.org/go/internal/cuedebug/debug.go @@ -0,0 +1,47 @@ +package cuedebug + +import ( + "sync" + + "cuelang.org/go/internal/envflag" +) + +// Flags holds the set of CUE_DEBUG flags. It is initialized by Init. +var Flags Config + +type Config struct { + HTTP bool + + // TODO: consider moving these evaluator-related options into a separate + // struct, so that it can be used in an API. We should use embedding, + // or some other mechanism, in that case to allow for the full set of + // allowed environment variables to be known. + + // Strict sets whether extra aggressive checking should be done. + // This should typically default to true for pre-releases and default to + // false otherwise. + Strict bool + + // LogEval sets the log level for the evaluator. + // There are currently only two levels: + // + // 0: no logging + // 1: logging + LogEval int + + // Sharing enables structure sharing. + Sharing bool `envflag:"default:true"` +} + +// Init initializes Flags. Note: this isn't named "init" because we +// don't always want it to be called (for example we don't want it to be +// called when running "cue help"), and also because we want the failure +// mode to be one of error not panic, which would be the only option if +// it was a top level init function. +func Init() error { + return initOnce() +} + +var initOnce = sync.OnceValue(func() error { + return envflag.Init(&Flags, "CUE_DEBUG") +}) diff --git a/vendor/cuelang.org/go/internal/cueexperiment/exp.go b/vendor/cuelang.org/go/internal/cueexperiment/exp.go new file mode 100644 index 0000000000..71396a797b --- /dev/null +++ b/vendor/cuelang.org/go/internal/cueexperiment/exp.go @@ -0,0 +1,38 @@ +package cueexperiment + +import ( + "sync" + + "cuelang.org/go/internal/envflag" +) + +// Flags holds the set of CUE_EXPERIMENT flags. It is initialized by Init. +// +// When adding, deleting, or modifying entries below, +// update cmd/cue/cmd/help.go as well for `cue help environment`. +var Flags struct { + Modules bool `envflag:"default:true"` + + // YAMLV3Decoder swaps the old internal/third_party/yaml decoder with the new + // decoder implemented in internal/encoding/yaml on top of yaml.v3. + YAMLV3Decoder bool `envflag:"default:true"` + + // EvalV3 enables the new evaluator. The new evaluator addresses various + // performance concerns. + EvalV3 bool +} + +// Init initializes Flags. Note: this isn't named "init" because we +// don't always want it to be called (for example we don't want it to be +// called when running "cue help"), and also because we want the failure +// mode to be one of error not panic, which would be the only option if +// it was a top level init function. +func Init() error { + return initOnce() +} + +var initOnce = sync.OnceValue(initAlways) + +func initAlways() error { + return envflag.Init(&Flags, "CUE_EXPERIMENT") +} diff --git a/vendor/cuelang.org/go/cue/load/read.go b/vendor/cuelang.org/go/internal/cueimports/read.go similarity index 79% rename from vendor/cuelang.org/go/cue/load/read.go rename to vendor/cuelang.org/go/internal/cueimports/read.go index 7548f42b60..af0ed94567 100644 --- a/vendor/cuelang.org/go/cue/load/read.go +++ b/vendor/cuelang.org/go/internal/cueimports/read.go @@ -1,4 +1,4 @@ -// Copyright 2018 The CUE Authors +// Copyright 2023 The CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package load +// Package cueimports provides support for reading the import +// section of a CUE file without needing to read the rest of it. +package cueimports import ( "bufio" @@ -74,7 +76,7 @@ func (r *importReader) readByte() byte { func (r *importReader) peekByte(skipSpace bool) byte { if r.err != nil { if r.nerr++; r.nerr > 10000 { - panic("go/build: import reader looping") + panic("import reader looping") } return 0 } @@ -88,10 +90,10 @@ func (r *importReader) peekByte(skipSpace bool) byte { } for r.err == nil && !r.eof { if skipSpace { - // For the purposes of this reader, semicolons are never necessary to + // For the purposes of this reader, commas are never necessary to // understand the input and are treated as spaces. switch c { - case ' ', '\f', '\t', '\r', '\n', ';': + case ' ', '\f', '\t', '\r', '\n', ',': c = r.readByte() continue @@ -101,14 +103,6 @@ func (r *importReader) peekByte(skipSpace bool) byte { for c != '\n' && r.err == nil && !r.eof { c = r.readByte() } - } else if c == '*' { - var c1 byte - for (c != '*' || c1 != '/') && r.err == nil { - if r.eof { - r.syntaxError() - } - c, c1 = c1, r.readByte() - } } else { r.syntaxError() } @@ -159,29 +153,15 @@ func (r *importReader) readIdent() { // readString reads a quoted string literal from the input. // If an identifier is not present, readString records a syntax error. -func (r *importReader) readString(save *[]string) { +func (r *importReader) readString() { switch r.nextByte(true) { - case '`': - start := len(r.buf) - 1 - for r.err == nil { - if r.nextByte(false) == '`' { - if save != nil { - *save = append(*save, string(r.buf[start:])) - } - break - } - if r.eof { - r.syntaxError() - } - } case '"': - start := len(r.buf) - 1 + // Note: although the syntax in the specification only allows + // a simple string literal here, the cue/parser package also + // allows #"..."# and """ literals, so there's some impedance-mismatch here. for r.err == nil { c := r.nextByte(false) if c == '"' { - if save != nil { - *save = append(*save, string(r.buf[start:])) - } break } if r.eof || c == '\n' { @@ -198,19 +178,17 @@ func (r *importReader) readString(save *[]string) { // readImport reads an import clause - optional identifier followed by quoted string - // from the input. -func (r *importReader) readImport(imports *[]string) { +func (r *importReader) readImport() { c := r.peekByte(true) - if c == '.' { - r.peek = 0 - } else if isIdent(c) { + if isIdent(c) { r.readIdent() } - r.readString(imports) + r.readString() } -// readImports is like io.ReadAll, except that it expects a CUE file as +// Read is like io.ReadAll, except that it expects a CUE file as // input and stops reading the input once the imports have completed. -func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, errors.Error) { +func Read(f io.Reader) ([]byte, errors.Error) { r := &importReader{b: bufio.NewReader(f)} r.readKeyword("package") @@ -220,11 +198,11 @@ func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte if r.peekByte(true) == '(' { r.nextByte(false) for r.peekByte(true) != ')' && r.err == nil { - r.readImport(imports) + r.readImport() } r.nextByte(false) } else { - r.readImport(imports) + r.readImport() } } @@ -235,8 +213,8 @@ func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte } // If we stopped for a syntax error, consume the whole file so that - // we are sure we don't change the errors that go/parser returns. - if r.err == errSyntax && !reportSyntaxError { + // we are sure we don't change the errors that the cue/parser returns. + if r.err == errSyntax { r.err = nil for r.err == nil && !r.eof { r.readByte() diff --git a/vendor/cuelang.org/go/internal/cueversion/transport.go b/vendor/cuelang.org/go/internal/cueversion/transport.go new file mode 100644 index 0000000000..d11c13c834 --- /dev/null +++ b/vendor/cuelang.org/go/internal/cueversion/transport.go @@ -0,0 +1,34 @@ +package cueversion + +import ( + "maps" + "net/http" +) + +// NewTransport returns an [http.RoundTripper] implementation +// that wraps next and adds a "User-Agent" header to every +// HTTP request containing the result of UserAgent(clientType). +// If next is nil, [http.DefaultTransport] will be used. +func NewTransport(clientType string, next http.RoundTripper) http.RoundTripper { + if next == nil { + next = http.DefaultTransport + } + return &userAgentTransport{ + next: next, + userAgent: UserAgent(clientType), + } +} + +type userAgentTransport struct { + next http.RoundTripper + userAgent string +} + +func (t *userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // RoundTrip isn't allowed to modify the request, but we + // can avoid doing a full clone. + req1 := *req + req1.Header = maps.Clone(req.Header) + req1.Header.Set("User-Agent", t.userAgent) + return t.next.RoundTrip(&req1) +} diff --git a/vendor/cuelang.org/go/internal/cueversion/version.go b/vendor/cuelang.org/go/internal/cueversion/version.go new file mode 100644 index 0000000000..4e9541275e --- /dev/null +++ b/vendor/cuelang.org/go/internal/cueversion/version.go @@ -0,0 +1,107 @@ +// Package cueversion provides access to the version of the +// cuelang.org/go module. +package cueversion + +import ( + "fmt" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + + "golang.org/x/mod/module" +) + +// LanguageVersion returns the CUE language version. +// This determines the latest version of CUE that +// is accepted by the module. +func LanguageVersion() string { + return "v0.9.2" +} + +// ModuleVersion returns the version of the cuelang.org/go module as best as can +// reasonably be determined. This is provided for informational +// and debugging purposes and should not be used to predicate +// version-specific behavior. +func ModuleVersion() string { + return moduleVersionOnce() +} + +const cueModule = "cuelang.org/go" + +var moduleVersionOnce = sync.OnceValue(func() string { + bi, ok := debug.ReadBuildInfo() + if !ok { + // This might happen if the binary was not built with module support + // or with an alternative toolchain. + return "(no-build-info)" + } + cueMod := findCUEModule(bi) + if cueMod == nil { + // Could happen if someone has forked CUE under a different + // module name; it also happens when running the cue tests. + return "(no-cue-module)" + } + version := cueMod.Version + if version != "(devel)" { + return version + } + // A specific version was not provided by the buildInfo + // so attempt to make our own. + var vcsTime time.Time + var vcsRevision string + for _, s := range bi.Settings { + switch s.Key { + case "vcs.time": + // If the format is invalid, we'll print a zero timestamp. + vcsTime, _ = time.Parse(time.RFC3339Nano, s.Value) + case "vcs.revision": + vcsRevision = s.Value + // module.PseudoVersion recommends the revision to be a 12-byte + // commit hash prefix, which is what cmd/go uses as well. + if len(vcsRevision) > 12 { + vcsRevision = vcsRevision[:12] + } + } + } + if vcsRevision != "" { + version = module.PseudoVersion("", "", vcsTime, vcsRevision) + } + return version +}) + +func findCUEModule(bi *debug.BuildInfo) *debug.Module { + if bi.Main.Path == cueModule { + return &bi.Main + } + for _, m := range bi.Deps { + if m.Replace != nil && m.Replace.Path == cueModule { + return m.Replace + } + if m.Path == cueModule { + return m + } + } + return nil +} + +// UserAgent returns a string suitable for adding as the User-Agent +// header in an HTTP agent. The clientType argument specifies +// how CUE is being used: if this is empty it defaults to "cuelang.org/go". +// +// Example: +// +// Cue/v0.8.0 (cuelang.org/go; vxXXX) Go/go1.22.0 (linux/amd64) +func UserAgent(clientType string) string { + if clientType == "" { + clientType = "cuelang.org/go" + } + // The Go version can contain spaces, but we don't want spaces inside + // Component/Version pair, so replace them with underscores. + // As the runtime version won't contain underscores itself, this + // is reversible. + goVersion := strings.ReplaceAll(runtime.Version(), " ", "_") + + return fmt.Sprintf("Cue/%s (%s; lang %s) Go/%s (%s/%s)", ModuleVersion(), clientType, LanguageVersion(), goVersion, runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/cuelang.org/go/internal/encoding/detect.go b/vendor/cuelang.org/go/internal/encoding/detect.go index b125a3fca9..b39653702f 100644 --- a/vendor/cuelang.org/go/internal/encoding/detect.go +++ b/vendor/cuelang.org/go/internal/encoding/detect.go @@ -35,21 +35,21 @@ func Detect(v cue.Value) (i build.Interpretation) { } func isOpenAPI(v cue.Value) bool { - s, _ := v.Lookup("openapi").String() + s, _ := v.LookupPath(cue.MakePath(cue.Str("openapi"))).String() if !strings.HasPrefix(s, "3.") { return false } - if _, err := v.Lookup("info", "title").String(); err != nil { + if _, err := v.LookupPath(cue.MakePath(cue.Str("info"), cue.Str("title"))).String(); err != nil { return false } - if _, err := v.Lookup("info", "version").String(); err != nil { + if _, err := v.LookupPath(cue.MakePath(cue.Str("info"), cue.Str("version"))).String(); err != nil { return false } return true } func isJSONSchema(v cue.Value) bool { - s, err := v.Lookup("$schema").String() + s, err := v.LookupPath(cue.MakePath(cue.Str("$schema"))).String() if err != nil { return false } diff --git a/vendor/cuelang.org/go/internal/encoding/encoder.go b/vendor/cuelang.org/go/internal/encoding/encoder.go index 9d151e34f7..00aa7ef953 100644 --- a/vendor/cuelang.org/go/internal/encoding/encoder.go +++ b/vendor/cuelang.org/go/internal/encoding/encoder.go @@ -40,6 +40,7 @@ import ( // An Encoder converts CUE to various file formats, including CUE itself. // An Encoder allows type Encoder struct { + ctx *cue.Context cfg *Config close func() error interpret func(cue.Value) (*ast.File, error) @@ -47,7 +48,6 @@ type Encoder struct { encValue func(cue.Value) error autoSimplify bool concrete bool - instance *cue.Instance } // IsConcrete reports whether the output is required to be concrete. @@ -67,12 +67,13 @@ func (e Encoder) Close() error { } // NewEncoder writes content to the file with the given specification. -func NewEncoder(f *build.File, cfg *Config) (*Encoder, error) { +func NewEncoder(ctx *cue.Context, f *build.File, cfg *Config) (*Encoder, error) { w, close, err := writer(f, cfg) if err != nil { return nil, err } e := &Encoder{ + ctx: ctx, cfg: cfg, close: close, } @@ -83,15 +84,11 @@ func NewEncoder(f *build.File, cfg *Config) (*Encoder, error) { // TODO: get encoding options cfg := &openapi.Config{} e.interpret = func(v cue.Value) (*ast.File, error) { - i := e.instance - if i == nil { - i = internal.MakeInstance(v).(*cue.Instance) - } - return openapi.Generate(i, cfg) + return openapi.Generate(v, cfg) } case build.ProtobufJSON: e.interpret = func(v cue.Value) (*ast.File, error) { - f := valueToFile(v) + f := internal.ToFile(v.Syntax()) return f, jsonpb.NewEncoder(v).RewriteFile(f) } @@ -124,7 +121,6 @@ func NewEncoder(f *build.File, cfg *Config) (*Encoder, error) { cue.Optional(fi.Optional), cue.Concrete(!fi.Incomplete), cue.Definitions(fi.Definitions), - cue.ResolveReferences(!fi.References), cue.DisallowCycles(!fi.Cycles), cue.InlineImports(cfg.InlineImports), ) @@ -149,7 +145,15 @@ func NewEncoder(f *build.File, cfg *Config) (*Encoder, error) { // Casting an ast.Expr to an ast.File ensures that it always ends // with a newline. - b, err := format.Node(internal.ToFile(n), opts...) + f := internal.ToFile(n) + if e.cfg.PkgName != "" && f.PackageName() == "" { + f.Decls = append([]ast.Decl{ + &ast.Package{ + Name: ast.NewIdent(e.cfg.PkgName), + }, + }, f.Decls...) + } + b, err := format.Node(f, opts...) if err != nil { return err } @@ -243,15 +247,6 @@ func (e *Encoder) EncodeFile(f *ast.File) error { return e.encodeFile(f, e.interpret) } -// EncodeInstance is as Encode, but stores instance information. This should -// all be retrievable from the value itself. -func (e *Encoder) EncodeInstance(v *cue.Instance) error { - e.instance = v - err := e.Encode(v.Value()) - e.instance = nil - return err -} - func (e *Encoder) Encode(v cue.Value) error { e.autoSimplify = true if err := v.Validate(cue.Concrete(e.concrete)); err != nil { @@ -267,7 +262,7 @@ func (e *Encoder) Encode(v cue.Value) error { if e.encValue != nil { return e.encValue(v) } - return e.encFile(valueToFile(v)) + return e.encFile(internal.ToFile(v.Syntax())) } func (e *Encoder) encodeFile(f *ast.File, interpret func(cue.Value) (*ast.File, error)) error { @@ -275,15 +270,13 @@ func (e *Encoder) encodeFile(f *ast.File, interpret func(cue.Value) (*ast.File, return e.encFile(f) } e.autoSimplify = true - var r cue.Runtime - inst, err := r.CompileFile(f) - if err != nil { + v := e.ctx.BuildFile(f) + if err := v.Err(); err != nil { return err } if interpret != nil { - return e.Encode(inst.Value()) + return e.Encode(v) } - v := inst.Value() if err := v.Validate(cue.Concrete(e.concrete)); err != nil { return err } diff --git a/vendor/cuelang.org/go/internal/encoding/encoding.go b/vendor/cuelang.org/go/internal/encoding/encoding.go index 53020fac0c..2fb7e817e1 100644 --- a/vendor/cuelang.org/go/internal/encoding/encoding.go +++ b/vendor/cuelang.org/go/internal/encoding/encoding.go @@ -18,11 +18,9 @@ package encoding import ( - "bytes" "fmt" "io" "net/url" - "os" "strings" "cuelang.org/go/cue" @@ -40,13 +38,15 @@ import ( "cuelang.org/go/encoding/protobuf/jsonpb" "cuelang.org/go/encoding/protobuf/textproto" "cuelang.org/go/internal" + "cuelang.org/go/internal/encoding/yaml" "cuelang.org/go/internal/filetypes" - "cuelang.org/go/internal/third_party/yaml" + "cuelang.org/go/internal/source" "golang.org/x/text/encoding/unicode" "golang.org/x/text/transform" ) type Decoder struct { + ctx *cue.Context cfg *Config closer io.Closer next func() (ast.Expr, error) @@ -61,7 +61,7 @@ type Decoder struct { err error } -type interpretFunc func(*cue.Instance) (file *ast.File, id string, err error) +type interpretFunc func(cue.Value) (file *ast.File, id string, err error) type rewriteFunc func(*ast.File) (file *ast.File, err error) // ID returns a canonical identifier for the decoded object or "" if no such @@ -103,30 +103,21 @@ func (i *Decoder) doInterpret() { } } if i.interpretFunc != nil { - var r cue.Runtime i.file = i.File() - inst, err := r.CompileFile(i.file) - if err != nil { + v := i.ctx.BuildFile(i.file) + if err := v.Err(); err != nil { i.err = err return } - i.file, i.id, i.err = i.interpretFunc(inst) + i.file, i.id, i.err = i.interpretFunc(v) } } -func toFile(x ast.Expr) *ast.File { - return internal.ToFile(x) -} - -func valueToFile(v cue.Value) *ast.File { - return internal.ToFile(v.Syntax()) -} - func (i *Decoder) File() *ast.File { if i.file != nil { return i.file } - return toFile(i.expr) + return internal.ToFile(i.expr) } func (i *Decoder) Err() error { @@ -137,7 +128,9 @@ func (i *Decoder) Err() error { } func (i *Decoder) Close() { - i.closer.Close() + if i.closer != nil { + i.closer.Close() + } } type Config struct { @@ -167,11 +160,11 @@ type Config struct { // NewDecoder returns a stream of non-rooted data expressions. The encoding // type of f must be a data type, but does not have to be an encoding that // can stream. stdin is used in case the file is "-". -func NewDecoder(f *build.File, cfg *Config) *Decoder { +func NewDecoder(ctx *cue.Context, f *build.File, cfg *Config) *Decoder { if cfg == nil { cfg = &Config{} } - i := &Decoder{filename: f.Filename, cfg: cfg} + i := &Decoder{filename: f.Filename, ctx: ctx, cfg: cfg} i.next = func() (ast.Expr, error) { if i.err != nil { return nil, i.err @@ -181,16 +174,22 @@ func NewDecoder(f *build.File, cfg *Config) *Decoder { if file, ok := f.Source.(*ast.File); ok { i.file = file - i.closer = io.NopCloser(strings.NewReader("")) i.validate(file, f) return i } - rc, err := reader(f, cfg.Stdin) - i.closer = rc - i.err = err - if err != nil { - return i + var srcr io.Reader + if f.Source == nil && f.Filename == "-" { + // TODO: should we allow this? + srcr = cfg.Stdin + } else { + rc, err := source.Open(f.Filename, f.Source) + i.closer = rc + i.err = err + if i.err != nil { + return i + } + srcr = rc } // For now we assume that all encodings require UTF-8. This will not be the @@ -199,19 +198,19 @@ func NewDecoder(f *build.File, cfg *Config) *Decoder { // TODO: this code also allows UTF16, which is too permissive for some // encodings. Switch to unicode.UTF8Sig once available. t := unicode.BOMOverride(unicode.UTF8.NewDecoder()) - r := transform.NewReader(rc, t) + r := transform.NewReader(srcr, t) switch f.Interpretation { case "": case build.Auto: openAPI := openAPIFunc(cfg, f) jsonSchema := jsonSchemaFunc(cfg, f) - i.interpretFunc = func(inst *cue.Instance) (file *ast.File, id string, err error) { - switch i.interpretation = Detect(inst.Value()); i.interpretation { + i.interpretFunc = func(v cue.Value) (file *ast.File, id string, err error) { + switch i.interpretation = Detect(v); i.interpretation { case build.JSONSchema: - return jsonSchema(inst) + return jsonSchema(v) case build.OpenAPI: - return openAPI(inst) + return openAPI(v) } return i.file, "", i.err } @@ -244,9 +243,9 @@ func NewDecoder(f *build.File, cfg *Config) *Decoder { i.next = json.NewDecoder(nil, path, r).Extract i.Next() case build.YAML: - d, err := yaml.NewDecoder(path, r) + b, err := io.ReadAll(r) i.err = err - i.next = d.Decode + i.next = yaml.NewDecoder(path, b).Decode i.Next() case build.Text: b, err := io.ReadAll(r) @@ -278,10 +277,10 @@ func NewDecoder(f *build.File, cfg *Config) *Decoder { } func jsonSchemaFunc(cfg *Config, f *build.File) interpretFunc { - return func(i *cue.Instance) (file *ast.File, id string, err error) { + return func(v cue.Value) (file *ast.File, id string, err error) { id = f.Tags["id"] if id == "" { - id, _ = i.Lookup("$id").String() + id, _ = v.LookupPath(cue.MakePath(cue.Str("$id"))).String() } if id != "" { u, err := url.Parse(id) @@ -297,7 +296,7 @@ func jsonSchemaFunc(cfg *Config, f *build.File) interpretFunc { Strict: cfg.Strict, } - file, err = jsonschema.Extract(i, cfg) + file, err = jsonschema.Extract(v, cfg) // TODO: simplify currently erases file line info. Reintroduce after fix. // file, err = simplify(file, err) return file, id, err @@ -306,8 +305,8 @@ func jsonSchemaFunc(cfg *Config, f *build.File) interpretFunc { func openAPIFunc(c *Config, f *build.File) interpretFunc { cfg := &openapi.Config{PkgName: c.PkgName} - return func(i *cue.Instance) (file *ast.File, id string, err error) { - file, err = openapi.Extract(i, cfg) + return func(v cue.Value) (file *ast.File, id string, err error) { + file, err = openapi.Extract(v, cfg) // TODO: simplify currently erases file line info. Reintroduce after fix. // file, err = simplify(file, err) return file, "", err @@ -324,29 +323,6 @@ func protobufJSONFunc(cfg *Config, file *build.File) rewriteFunc { } } -func reader(f *build.File, stdin io.Reader) (io.ReadCloser, error) { - switch s := f.Source.(type) { - case nil: - // Use the file name. - case string: - return io.NopCloser(strings.NewReader(s)), nil - case []byte: - return io.NopCloser(bytes.NewReader(s)), nil - case *bytes.Buffer: - // is io.Reader, but it needs to be readable repeatedly - if s != nil { - return io.NopCloser(bytes.NewReader(s.Bytes())), nil - } - default: - return nil, fmt.Errorf("invalid source type %T", f.Source) - } - // TODO: should we allow this? - if f.Filename == "-" { - return io.NopCloser(stdin), nil - } - return os.Open(f.Filename) -} - func shouldValidate(i *filetypes.FileInfo) bool { // TODO: We ignore attributes for now. They should be enabled by default. return false || diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/decode.go b/vendor/cuelang.org/go/internal/encoding/yaml/decode.go new file mode 100644 index 0000000000..327b0908c4 --- /dev/null +++ b/vendor/cuelang.org/go/internal/encoding/yaml/decode.go @@ -0,0 +1,658 @@ +package yaml + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" + "cuelang.org/go/internal/cueexperiment" + tpyaml "cuelang.org/go/internal/third_party/yaml" +) + +// TODO(mvdan): we should sanity check that the decoder always produces valid CUE, +// as it is possible to construct a cue/ast syntax tree with invalid literals +// or with expressions that will always error, such as `float & 123`. +// +// One option would be to do this as part of the tests; a more general approach +// may be fuzzing, which would find more bugs and work for any decoder, +// although it may be slow as we need to involve the evaluator. + +// Decoder is a temporary interface compatible with both the old and new yaml decoders. +type Decoder interface { + // Decode consumes a YAML value and returns it in CUE syntax tree node. + Decode() (ast.Expr, error) +} + +// NewDecoder is a temporary constructor compatible with both the old and new yaml decoders. +// Note that the signature matches the new yaml decoder, as the old signature can only error +// when reading a source that isn't []byte. +func NewDecoder(filename string, b []byte) Decoder { + if cueexperiment.Flags.YAMLV3Decoder { + return newDecoder(filename, b) + } + dec, err := tpyaml.NewDecoder(filename, b) + if err != nil { + panic(err) // should never happen as we give it []byte + } + return dec +} + +// decoder wraps a [yaml.Decoder] to extract CUE syntax tree nodes. +type decoder struct { + yamlDecoder yaml.Decoder + + // yamlNonEmpty is true once yamlDecoder tells us the input YAML wasn't empty. + // Useful so that we can extract "null" when the input is empty. + yamlNonEmpty bool + + // decodeErr is returned by any further calls to Decode when not nil. + decodeErr error + + tokFile *token.File + tokLines []int + + // pendingHeadComments collects the head (preceding) comments + // from the YAML nodes we are extracting. + // We can't add comments to a CUE syntax tree node until we've created it, + // but we need to extract these comments first since they have earlier positions. + pendingHeadComments []*ast.Comment + + // extractingAliases ensures we don't loop forever when expanding YAML anchors. + extractingAliases map[*yaml.Node]bool + + // lastPos is the last YAML node position that we decoded, + // used for working out relative positions such as token.NewSection. + // This position can only increase, moving forward in the file. + lastPos token.Position + + // forceNewline ensures that the next position will be on a new line. + forceNewline bool +} + +// TODO(mvdan): this can be io.Reader really, except that token.Pos is offset-based, +// so the only way to really have true Offset+Line+Col numbers is to know +// the size of the entire YAML node upfront. +// With json we can use RawMessage to know the size of the input +// before we extract into ast.Expr, but unfortunately, yaml.Node has no size. + +// newDecoder creates a decoder for YAML values to extract CUE syntax tree nodes. +// +// The filename is used for position information in CUE syntax tree nodes +// as well as any errors encountered while decoding YAML. +func newDecoder(filename string, b []byte) *decoder { + // Note that yaml.v3 can insert a null node just past the end of the input + // in some edge cases, so we pretend that there's an extra newline + // so that we don't panic when handling such a position. + tokFile := token.NewFile(filename, 0, len(b)+1) + tokFile.SetLinesForContent(b) + return &decoder{ + tokFile: tokFile, + tokLines: append(tokFile.Lines(), len(b)), + yamlDecoder: *yaml.NewDecoder(bytes.NewReader(b)), + } +} + +// Decode consumes a YAML value and returns it in CUE syntax tree node. +// +// A nil node with an io.EOF error is returned once no more YAML values +// are available for decoding. +func (d *decoder) Decode() (ast.Expr, error) { + if err := d.decodeErr; err != nil { + return nil, err + } + var yn yaml.Node + if err := d.yamlDecoder.Decode(&yn); err != nil { + if err == io.EOF { + // Any further Decode calls must return EOF to avoid an endless loop. + d.decodeErr = io.EOF + + // If the input is empty, we produce a single null literal with EOF. + // Note that when the input contains "---", we get an empty document + // with a null scalar value inside instead. + // + // TODO(mvdan): the old decoder seemingly intended to do this, + // but returned a "null" literal with io.EOF, which consumers ignored. + if false && !d.yamlNonEmpty { + return &ast.BasicLit{ + Kind: token.NULL, + Value: "null", + }, nil + } + // If the input wasn't empty, we already decoded some CUE syntax nodes, + // so here we should just return io.EOF to stop. + return nil, io.EOF + } + // Unfortunately, yaml.v3's syntax errors are opaque strings, + // and they only include line numbers in some but not all cases. + // TODO(mvdan): improve upstream's errors so they are structured + // and always contain some position information. + e := err.Error() + if s, ok := strings.CutPrefix(e, "yaml: line "); ok { + // From "yaml: line 3: some issue" to "foo.yaml:3: some issue". + e = d.tokFile.Name() + ":" + s + } else if s, ok := strings.CutPrefix(e, "yaml:"); ok { + // From "yaml: some issue" to "foo.yaml: some issue". + e = d.tokFile.Name() + ":" + s + } else { + return nil, err + } + err = errors.New(e) + // Any further Decode calls repeat this error. + d.decodeErr = err + return nil, err + } + d.yamlNonEmpty = true + return d.extract(&yn) +} + +// Unmarshal parses a single YAML value to a CUE expression. +func Unmarshal(filename string, data []byte) (ast.Expr, error) { + d := NewDecoder(filename, data) + n, err := d.Decode() + if err != nil { + if err == io.EOF { + return nil, nil // empty input + } + return nil, err + } + // TODO(mvdan): decoding the entire next value is unnecessary; + // consider either a "More" or "Done" method to tell if we are at EOF, + // or splitting the Decode method into two variants. + // This should use proper error values with positions as well. + if n2, err := d.Decode(); err == nil { + return nil, fmt.Errorf("%s: expected a single YAML document", n2.Pos()) + } else if err != io.EOF { + return nil, fmt.Errorf("expected a single YAML document: %v", err) + } + return n, nil +} + +func (d *decoder) extract(yn *yaml.Node) (ast.Expr, error) { + d.addHeadCommentsToPending(yn) + var expr ast.Expr + var err error + switch yn.Kind { + case yaml.DocumentNode: + expr, err = d.document(yn) + case yaml.SequenceNode: + expr, err = d.sequence(yn) + case yaml.MappingNode: + expr, err = d.mapping(yn) + case yaml.ScalarNode: + expr, err = d.scalar(yn) + case yaml.AliasNode: + expr, err = d.alias(yn) + default: + return nil, d.posErrorf(yn, "unknown yaml node kind: %d", yn.Kind) + } + if err != nil { + return nil, err + } + d.addCommentsToNode(expr, yn, 1) + return expr, nil +} + +// comments parses a newline-delimited list of YAML "#" comments +// and turns them into a list of cue/ast comments. +func (d *decoder) comments(src string) []*ast.Comment { + if src == "" { + return nil + } + var comments []*ast.Comment + for _, line := range strings.Split(src, "\n") { + if line == "" { + continue // yaml.v3 comments have a trailing newline at times + } + comments = append(comments, &ast.Comment{ + // Trim the leading "#". + // Note that yaml.v3 does not give us comment positions. + Text: "//" + line[1:], + }) + } + return comments +} + +// addHeadCommentsToPending parses a node's head comments and adds them to a pending list, +// to be used later by addComments once a cue/ast node is constructed. +func (d *decoder) addHeadCommentsToPending(yn *yaml.Node) { + comments := d.comments(yn.HeadComment) + // TODO(mvdan): once yaml.v3 records comment positions, + // we can better ensure that sections separated by empty lines are kept that way. + // For now, all we can do is approximate by counting lines, + // and assuming that head comments are not separated from their node. + // This will be wrong in some cases, moving empty lines, but is better than nothing. + if len(d.pendingHeadComments) == 0 && len(comments) > 0 { + c := comments[0] + if d.lastPos.IsValid() && (yn.Line-len(comments))-d.lastPos.Line >= 2 { + c.Slash = c.Slash.WithRel(token.NewSection) + } + } + d.pendingHeadComments = append(d.pendingHeadComments, comments...) +} + +// addCommentsToNode adds any pending head comments, plus a YAML node's line +// and foot comments, to a cue/ast node. +func (d *decoder) addCommentsToNode(n ast.Node, yn *yaml.Node, linePos int8) { + // cue/ast and cue/format are not able to attach a comment to a node + // when the comment immediately follows the node. + // For some nodes like fields, the best we can do is move the comments up. + // For the root-level struct, we do want to leave comments + // at the end of the document to be left at the very end. + // + // TODO(mvdan): can we do better? for example, support attaching trailing comments to a cue/ast.Node? + footComments := d.comments(yn.FootComment) + if _, ok := n.(*ast.StructLit); !ok { + d.pendingHeadComments = append(d.pendingHeadComments, footComments...) + footComments = nil + } + if comments := d.pendingHeadComments; len(comments) > 0 { + ast.AddComment(n, &ast.CommentGroup{ + Doc: true, + Position: 0, + List: comments, + }) + } + if comments := d.comments(yn.LineComment); len(comments) > 0 { + ast.AddComment(n, &ast.CommentGroup{ + Line: true, + Position: linePos, + List: comments, + }) + } + if comments := footComments; len(comments) > 0 { + ast.AddComment(n, &ast.CommentGroup{ + // After 100 tokens, so that the comment goes after the entire node. + // TODO(mvdan): this is hacky, can the cue/ast API support trailing comments better? + Position: 100, + List: comments, + }) + } + d.pendingHeadComments = nil +} + +func (d *decoder) posErrorf(yn *yaml.Node, format string, args ...any) error { + // TODO(mvdan): use columns as well; for now they are left out to avoid test churn + // return fmt.Errorf(d.pos(n).String()+" "+format, args...) + return fmt.Errorf(d.tokFile.Name()+":"+strconv.Itoa(yn.Line)+": "+format, args...) +} + +// pos converts a YAML node position to a cue/ast position. +// Note that this method uses and updates the last position in lastPos, +// so it should be called on YAML nodes in increasing position order. +func (d *decoder) pos(yn *yaml.Node) token.Pos { + // Calculate the position's offset via the line and column numbers. + offset := d.tokLines[yn.Line-1] + (yn.Column - 1) + pos := d.tokFile.Pos(offset, token.NoRelPos) + + if d.forceNewline { + d.forceNewline = false + pos = pos.WithRel(token.Newline) + } else if d.lastPos.IsValid() { + switch { + case yn.Line-d.lastPos.Line >= 2: + pos = pos.WithRel(token.NewSection) + case yn.Line-d.lastPos.Line == 1: + pos = pos.WithRel(token.Newline) + case yn.Column-d.lastPos.Column > 0: + pos = pos.WithRel(token.Blank) + default: + pos = pos.WithRel(token.NoSpace) + } + // If for any reason the node's position is before the last position, + // give up and return an empty position. Akin to: yn.Pos().Before(d.lastPos) + // + // TODO(mvdan): Brought over from the old decoder; when does this happen? + // Can we get rid of those edge cases and this bit of logic? + if yn.Line < d.lastPos.Line || (yn.Line == d.lastPos.Line && yn.Column < d.lastPos.Column) { + return token.NoPos + } + } + d.lastPos = token.Position{Line: yn.Line, Column: yn.Column} + return pos +} + +func (d *decoder) document(yn *yaml.Node) (ast.Expr, error) { + if n := len(yn.Content); n != 1 { + return nil, d.posErrorf(yn, "yaml document nodes are meant to have one content node but have %d", n) + } + return d.extract(yn.Content[0]) +} + +func (d *decoder) sequence(yn *yaml.Node) (ast.Expr, error) { + list := &ast.ListLit{ + Lbrack: d.pos(yn).WithRel(token.Blank), + } + multiline := false + if len(yn.Content) > 0 { + multiline = yn.Line < yn.Content[len(yn.Content)-1].Line + } + + // If a list is empty, or ends with a struct, the closing `]` is on the same line. + closeSameLine := true + for _, c := range yn.Content { + d.forceNewline = multiline + elem, err := d.extract(c) + if err != nil { + return nil, err + } + list.Elts = append(list.Elts, elem) + // A list of structs begins with `[{`, so let it end with `}]`. + _, closeSameLine = elem.(*ast.StructLit) + } + if multiline && !closeSameLine { + list.Rbrack = list.Rbrack.WithRel(token.Newline) + } + return list, nil +} + +func (d *decoder) mapping(yn *yaml.Node) (ast.Expr, error) { + strct := &ast.StructLit{} + multiline := false + if len(yn.Content) > 0 { + multiline = yn.Line < yn.Content[len(yn.Content)-1].Line + } + + if err := d.insertMap(yn, strct, multiline, false); err != nil { + return nil, err + } + // TODO(mvdan): moving these positions above insertMap breaks a few tests, why? + strct.Lbrace = d.pos(yn).WithRel(token.Blank) + if multiline { + strct.Rbrace = strct.Lbrace.WithRel(token.Newline) + } else { + strct.Rbrace = strct.Lbrace + } + return strct, nil +} + +func (d *decoder) insertMap(yn *yaml.Node, m *ast.StructLit, multiline, mergeValues bool) error { + l := len(yn.Content) +outer: + for i := 0; i < l; i += 2 { + if multiline { + d.forceNewline = true + } + yk, yv := yn.Content[i], yn.Content[i+1] + d.addHeadCommentsToPending(yk) + if isMerge(yk) { + mergeValues = true + if err := d.merge(yv, m, multiline); err != nil { + return err + } + continue + } + if yk.Kind != yaml.ScalarNode { + return d.posErrorf(yn, "invalid map key: %v", yk.ShortTag()) + } + + field := &ast.Field{} + label, err := d.label(yk) + if err != nil { + return err + } + d.addCommentsToNode(label, yk, 1) + field.Label = label + + if mergeValues { + key := labelStr(label) + for _, decl := range m.Elts { + f := decl.(*ast.Field) + name, _, err := ast.LabelName(f.Label) + if err == nil && name == key { + f.Value, err = d.extract(yv) + if err != nil { + return err + } + continue outer + } + } + } + + value, err := d.extract(yv) + if err != nil { + return err + } + field.Value = value + + m.Elts = append(m.Elts, field) + } + return nil +} + +func (d *decoder) merge(yn *yaml.Node, m *ast.StructLit, multiline bool) error { + switch yn.Kind { + case yaml.MappingNode: + return d.insertMap(yn, m, multiline, true) + case yaml.AliasNode: + return d.insertMap(yn.Alias, m, multiline, true) + case yaml.SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(yn.Content) - 1; i >= 0; i-- { + if err := d.merge(yn.Content[i], m, multiline); err != nil { + return err + } + } + return nil + default: + return d.posErrorf(yn, "map merge requires map or sequence of maps as the value") + } +} + +func (d *decoder) label(yn *yaml.Node) (ast.Label, error) { + pos := d.pos(yn) + + expr, err := d.scalar(yn) + if err != nil { + return nil, err + } + switch expr := expr.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + if ast.IsValidIdent(yn.Value) && !internal.IsDefOrHidden(yn.Value) { + return &ast.Ident{ + NamePos: pos, + Name: yn.Value, + }, nil + } + ast.SetPos(expr, pos) + return expr, nil + } + + return &ast.BasicLit{ + ValuePos: pos, + Kind: token.STRING, + Value: literal.Label.Quote(expr.Value), + }, nil + + default: + return nil, d.posErrorf(yn, "invalid label "+yn.Value) + } +} + +const ( + // TODO(mvdan): The strings below are from yaml.v3; should we be relying on upstream somehow? + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +// rxAnyOctalYaml11 uses the implicit tag resolution regular expression for base-8 integers +// from YAML's 1.1 spec, but including the 8 and 9 digits which aren't valid for octal integers. +var rxAnyOctalYaml11 = regexp.MustCompile(`^[-+]?0[0-9_]+$`) + +func (d *decoder) scalar(yn *yaml.Node) (ast.Expr, error) { + tag := yn.ShortTag() + // If the YAML scalar has no explicit tag, yaml.v3 infers a float tag, + // and the value looks like a YAML 1.1 octal literal, + // that means the input value was like `01289` and not a valid octal integer. + // The safest thing to do, and what most YAML decoders do, is to interpret as a string. + if yn.Style&yaml.TaggedStyle == 0 && tag == floatTag && rxAnyOctalYaml11.MatchString(yn.Value) { + tag = strTag + } + switch tag { + // TODO: use parse literal or parse expression instead. + case timestampTag: + return &ast.BasicLit{ + ValuePos: d.pos(yn), + Kind: token.STRING, + Value: literal.String.Quote(yn.Value), + }, nil + case strTag: + return &ast.BasicLit{ + ValuePos: d.pos(yn), + Kind: token.STRING, + Value: literal.String.WithOptionalTabIndent(1).Quote(yn.Value), + }, nil + + case binaryTag: + data, err := base64.StdEncoding.DecodeString(yn.Value) + if err != nil { + return nil, d.posErrorf(yn, "!!binary value contains invalid base64 data") + } + return &ast.BasicLit{ + ValuePos: d.pos(yn), + Kind: token.STRING, + Value: literal.Bytes.Quote(string(data)), + }, nil + + case boolTag: + t := false + switch yn.Value { + // TODO(mvdan): The strings below are from yaml.v3; should we be relying on upstream somehow? + case "true", "True", "TRUE": + t = true + } + lit := ast.NewBool(t) + lit.ValuePos = d.pos(yn) + return lit, nil + + case intTag: + // Convert YAML octal to CUE octal. If YAML accepted an invalid + // integer, just convert it as well to ensure CUE will fail. + value := yn.Value + if len(value) > 1 && value[0] == '0' && value[1] <= '9' { + value = "0o" + value[1:] + } + var info literal.NumInfo + // We make the assumption that any valid YAML integer literal will be a valid + // CUE integer literal as well, with the only exception of octal numbers above. + // Note that `!!int 123.456` is not allowed. + if err := literal.ParseNum(value, &info); err != nil { + return nil, d.posErrorf(yn, "cannot decode %q as %s: %v", value, tag, err) + } else if !info.IsInt() { + return nil, d.posErrorf(yn, "cannot decode %q as %s: not a literal number", value, tag) + } + return d.makeNum(yn, value, token.INT), nil + + case floatTag: + value := yn.Value + // TODO(mvdan): The strings below are from yaml.v3; should we be relying on upstream somehow? + switch value { + case ".inf", ".Inf", ".INF", "+.inf", "+.Inf", "+.INF": + value = "+Inf" + case "-.inf", "-.Inf", "-.INF": + value = "-Inf" + case ".nan", ".NaN", ".NAN": + value = "NaN" + default: + var info literal.NumInfo + // We make the assumption that any valid YAML float literal will be a valid + // CUE float literal as well, with the only exception of Inf/NaN above. + // Note that `!!float 123` is allowed. + if err := literal.ParseNum(value, &info); err != nil { + return nil, d.posErrorf(yn, "cannot decode %q as %s: %v", value, tag, err) + } + // If the decoded YAML scalar was explicitly or implicitly a float, + // and the scalar literal looks like an integer, + // unify it with "number" to record the fact that it was represented as a float. + // Don't unify with float, as `float & 123` is invalid, and there's no need + // to forbid representing the number as an integer either. + if yn.Tag != "" { + if p := strings.IndexAny(value, ".eEiInN"); p == -1 { + // TODO: number(v) when we have conversions + // TODO(mvdan): don't shove the unification inside a BasicLit.Value string + // + // TODO(mvdan): would it be better to do turn `!!float 123` into `123.0` + // rather than `number & 123`? Note that `float & 123` is an error. + value = fmt.Sprintf("number & %s", value) + } + } + } + return d.makeNum(yn, value, token.FLOAT), nil + + case nullTag: + return &ast.BasicLit{ + ValuePos: d.pos(yn).WithRel(token.Blank), + Kind: token.NULL, + Value: "null", + }, nil + default: + return nil, d.posErrorf(yn, "cannot unmarshal tag %q", tag) + } +} + +func (d *decoder) makeNum(yn *yaml.Node, val string, kind token.Token) (expr ast.Expr) { + val, negative := strings.CutPrefix(val, "-") + expr = &ast.BasicLit{ + ValuePos: d.pos(yn), + Kind: kind, + Value: val, + } + if negative { + expr = &ast.UnaryExpr{ + OpPos: d.pos(yn), + Op: token.SUB, + X: expr, + } + } + return expr +} + +func (d *decoder) alias(yn *yaml.Node) (ast.Expr, error) { + if d.extractingAliases[yn] { + // TODO this could actually be allowed in some circumstances. + return nil, d.posErrorf(yn, "anchor %q value contains itself", yn.Value) + } + if d.extractingAliases == nil { + d.extractingAliases = make(map[*yaml.Node]bool) + } + d.extractingAliases[yn] = true + var node ast.Expr + node, err := d.extract(yn.Alias) + delete(d.extractingAliases, yn) + return node, err +} + +func labelStr(l ast.Label) string { + switch l := l.(type) { + case *ast.Ident: + return l.Name + case *ast.BasicLit: + s, _ := literal.Unquote(l.Value) + return s + } + return "" +} + +func isMerge(yn *yaml.Node) bool { + // TODO(mvdan): The boolean logic below is from yaml.v3; should we be relying on upstream somehow? + return yn.Kind == yaml.ScalarNode && yn.Value == "<<" && (yn.Tag == "" || yn.Tag == "!" || yn.ShortTag() == mergeTag) +} diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go index 55871dab31..87aec6e417 100644 --- a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go +++ b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go @@ -164,7 +164,7 @@ func shouldQuote(str string) bool { // This regular expression conservatively matches any date, time string, // or base60 float. -var useQuote = regexp.MustCompile(`^[\-+0-9:\. \t]+([-:]|[tT])[\-+0-9:\. \t]+[zZ]?$`) +var useQuote = regexp.MustCompile(`^[\-+0-9:\. \t]+([-:]|[tT])[\-+0-9:\. \t]+[zZ]?$|^0x[a-fA-F0-9]+$`) // legacyStrings contains a map of fixed strings with special meaning for any // type in the YAML Tag registry (https://yaml.org/type/index.html) as used diff --git a/vendor/cuelang.org/go/internal/envflag/flag.go b/vendor/cuelang.org/go/internal/envflag/flag.go new file mode 100644 index 0000000000..4c640ffafd --- /dev/null +++ b/vendor/cuelang.org/go/internal/envflag/flag.go @@ -0,0 +1,95 @@ +package envflag + +import ( + "errors" + "fmt" + "os" + "reflect" + "strconv" + "strings" +) + +// Init uses Parse with the contents of the given environment variable as input. +func Init[T any](flags *T, envVar string) error { + err := Parse(flags, os.Getenv(envVar)) + if err != nil { + return fmt.Errorf("cannot parse %s: %w", envVar, err) + } + return nil +} + +// Parse initializes the fields in flags from the attached struct field tags as +// well as the contents of the given string. +// +// The struct field tag may contain a default value other than the zero value, +// such as `envflag:"default:true"` to set a boolean field to true by default. +// +// The string may contain a comma-separated list of name=value pairs values +// representing the boolean fields in the struct type T. If the value is omitted +// entirely, the value is assumed to be name=true. +// +// Names are treated case insensitively. Value strings are parsed as Go booleans +// via [strconv.ParseBool], meaning that they accept "true" and "false" but also +// the shorter "1" and "0". +func Parse[T any](flags *T, env string) error { + // Collect the field indices and set the default values. + indexByName := make(map[string]int) + fv := reflect.ValueOf(flags).Elem() + ft := fv.Type() + for i := 0; i < ft.NumField(); i++ { + field := ft.Field(i) + defaultValue := false + if tagStr, ok := field.Tag.Lookup("envflag"); ok { + defaultStr, ok := strings.CutPrefix(tagStr, "default:") + // TODO: consider panicking for these error types. + if !ok { + return fmt.Errorf("expected tag like `envflag:\"default:true\"`: %s", tagStr) + } + v, err := strconv.ParseBool(defaultStr) + if err != nil { + return fmt.Errorf("invalid default bool value for %s: %v", field.Name, err) + } + defaultValue = v + fv.Field(i).SetBool(defaultValue) + } + indexByName[strings.ToLower(field.Name)] = i + } + + if env == "" { + return nil + } + var errs []error + for _, elem := range strings.Split(env, ",") { + name, valueStr, ok := strings.Cut(elem, "=") + // "somename" is short for "somename=true" or "somename=1". + value := true + if ok { + v, err := strconv.ParseBool(valueStr) + if err != nil { + // Invalid format, return an error immediately. + return invalidError{ + fmt.Errorf("invalid bool value for %s: %v", name, err), + } + } + value = v + } + index, ok := indexByName[name] + if !ok { + // Unknown option, proceed processing options as long as the format + // is valid. + errs = append(errs, fmt.Errorf("unknown %s", elem)) + continue + } + fv.Field(index).SetBool(value) + } + return errors.Join(errs...) +} + +// An InvalidError indicates a malformed input string. +var InvalidError = errors.New("invalid value") + +type invalidError struct{ error } + +func (invalidError) Is(err error) bool { + return err == InvalidError +} diff --git a/vendor/cuelang.org/go/internal/filetypes/filetypes.go b/vendor/cuelang.org/go/internal/filetypes/filetypes.go index 27e90b5928..2d3139b58d 100644 --- a/vendor/cuelang.org/go/internal/filetypes/filetypes.go +++ b/vendor/cuelang.org/go/internal/filetypes/filetypes.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:generate go run gen.go - package filetypes import ( @@ -68,6 +66,10 @@ type FileInfo struct { Attributes bool `json:"attributes"` // include/allow attributes } +// TODO(mvdan): the funcs below make use of typesValue concurrently, +// even though we clearly document that cue.Values are not safe for concurrent use. +// It seems to be OK in practice, as otherwise we would run into `go test -race` failures. + // FromFile return detailed file info for a given build file. // Encoding must be specified. // TODO: mode should probably not be necessary here. @@ -96,47 +98,50 @@ func FromFile(b *build.File, mode Mode) (*FileInfo, error) { }, nil } - i, errs := update(nil, cuegenValue, cuegenValue, "modes", mode.String()) - v := i.LookupDef("FileInfo") - v = v.Fill(b) + typesInit() + modeVal := typesValue.LookupPath(cue.MakePath(cue.Str("modes"), cue.Str(mode.String()))) + fileVal := modeVal.LookupPath(cue.MakePath(cue.Str("FileInfo"))) + fileVal = fileVal.FillPath(cue.Path{}, b) if b.Encoding == "" { - ext := i.Lookup("extensions", fileExt(b.Filename)) + ext := modeVal.LookupPath(cue.MakePath(cue.Str("extensions"), cue.Str(fileExt(b.Filename)))) if ext.Exists() { - v = v.Unify(ext) + fileVal = fileVal.Unify(ext) } } + var errs errors.Error - interpretation, _ := v.Lookup("interpretation").String() + interpretation, _ := fileVal.LookupPath(cue.MakePath(cue.Str("interpretation"))).String() if b.Form != "" { - v, errs = update(errs, v, i, "forms", string(b.Form)) + fileVal, errs = unifyWith(errs, fileVal, typesValue, "forms", string(b.Form)) // may leave some encoding-dependent options open in data mode. } else if interpretation != "" { // always sets schema form. - v, errs = update(errs, v, i, "interpretations", interpretation) + fileVal, errs = unifyWith(errs, fileVal, typesValue, "interpretations", interpretation) } if interpretation == "" { - s, err := v.Lookup("encoding").String() + s, err := fileVal.LookupPath(cue.MakePath(cue.Str("encoding"))).String() if err != nil { return nil, err } - v, errs = update(errs, v, i, "encodings", s) + fileVal, errs = unifyWith(errs, fileVal, modeVal, "encodings", s) } fi := &FileInfo{} - if err := v.Decode(fi); err != nil { + if err := fileVal.Decode(fi); err != nil { return nil, errors.Wrapf(err, token.NoPos, "could not parse arguments") } return fi, errs } -func update(errs errors.Error, v, i cue.Value, field, value string) (cue.Value, errors.Error) { - v = v.Unify(i.Lookup(field, value)) - if err := v.Err(); err != nil { +// unifyWith returns the equivalent of `v1 & v2[field][value]`. +func unifyWith(errs errors.Error, v1, v2 cue.Value, field, value string) (cue.Value, errors.Error) { + v1 = v1.Unify(v2.LookupPath(cue.MakePath(cue.Str(field), cue.Str(value)))) + if err := v1.Err(); err != nil { errs = errors.Append(errs, errors.Newf(token.NoPos, "unknown %s %s", field, value)) } - return v, errs + return v1, errs } // ParseArgs converts a sequence of command line arguments representing @@ -157,7 +162,8 @@ func update(errs errors.Error, v, i cue.Value, field, value string) (cue.Value, // // json: foo.data bar.data json+schema: bar.schema func ParseArgs(args []string) (files []*build.File, err error) { - var inst, v cue.Value + typesInit() + var modeVal, fileVal cue.Value qualifier := "" hasFiles := false @@ -166,7 +172,7 @@ func ParseArgs(args []string) (files []*build.File, err error) { a := strings.Split(s, ":") switch { case len(a) == 1 || len(a[0]) == 1: // filename - if !v.Exists() { + if !fileVal.Exists() { if len(a) == 1 && strings.HasSuffix(a[0], ".cue") { // Handle majority case. files = append(files, &build.File{ @@ -177,12 +183,26 @@ func ParseArgs(args []string) (files []*build.File, err error) { hasFiles = true continue } - inst, v, err = parseType("", Input) + + // The CUE command works just fine without this (how?), + // but the API tests require this for some reason. + // + // This is almost certainly wrong, and in the wrong place. + // + // TODO(aram): why do we need this here? + if len(a) == 1 && strings.HasSuffix(a[0], ".wasm") { + continue + } + + modeVal, fileVal, err = parseType("", Input) if err != nil { return nil, err } } - f, err := toFile(inst, v, s) + if s == "" { + return nil, errors.Newf(token.NoPos, "empty file name") + } + f, err := toFile(modeVal, fileVal, s) if err != nil { return nil, err } @@ -204,7 +224,7 @@ func ParseArgs(args []string) (files []*build.File, err error) { case qualifier != "" && !hasFiles: return nil, errors.Newf(token.NoPos, "scoped qualifier %q without file", qualifier+":") } - inst, v, err = parseType(a[0], Input) + modeVal, fileVal, err = parseType(a[0], Input) if err != nil { return nil, err } @@ -235,73 +255,82 @@ func ParseFile(s string, mode Mode) (*build.File, error) { } if file == "" { - return nil, errors.Newf(token.NoPos, "empty file name in %q", s) + if s != "" { + return nil, errors.Newf(token.NoPos, "empty file name in %q", s) + } + return nil, errors.Newf(token.NoPos, "empty file name") } - - inst, val, err := parseType(scope, mode) + // Quickly discard files which we aren't interested in. + // These cases are very common when loading `./...` in a large repository. + typesInit() + if scope == "" { + ext := fileExt(file) + if file == "-" { + // not handled here + } else if ext == "" { + return nil, errors.Newf(token.NoPos, "no encoding specified for file %q", file) + } else if !knownExtensions[ext] { + return nil, errors.Newf(token.NoPos, "unknown file extension %s", ext) + } + } + modeVal, fileVal, err := parseType(scope, mode) if err != nil { return nil, err } - return toFile(inst, val, file) + return toFile(modeVal, fileVal, file) } -func hasEncoding(v cue.Value) (concrete, hasDefault bool) { - enc := v.Lookup("encoding") +func hasEncoding(v cue.Value) bool { + enc := v.LookupPath(cue.MakePath(cue.Str("encoding"))) d, _ := enc.Default() - return enc.IsConcrete(), d.IsConcrete() + return d.IsConcrete() } -func toFile(i, v cue.Value, filename string) (*build.File, error) { - v = v.Fill(filename, "filename") - - if concrete, hasDefault := hasEncoding(v); !concrete { +func toFile(modeVal, fileVal cue.Value, filename string) (*build.File, error) { + if !hasEncoding(fileVal) { if filename == "-" { - if !hasDefault { - v = v.Unify(i.LookupDef("Default")) - } + fileVal = fileVal.Unify(modeVal.LookupPath(cue.MakePath(cue.Str("Default")))) } else if ext := fileExt(filename); ext != "" { - if x := i.Lookup("extensions", ext); x.Exists() || !hasDefault { - v = v.Unify(x) - if err := v.Err(); err != nil { - return nil, errors.Newf(token.NoPos, - "unknown file extension %s", ext) - } + extFile := modeVal.LookupPath(cue.MakePath(cue.Str("extensions"), cue.Str(ext))) + fileVal = fileVal.Unify(extFile) + if err := fileVal.Err(); err != nil { + return nil, errors.Newf(token.NoPos, "unknown file extension %s", ext) } - } else if !hasDefault { - return nil, errors.Newf(token.NoPos, - "no encoding specified for file %q", filename) + } else { + return nil, errors.Newf(token.NoPos, "no encoding specified for file %q", filename) } } - f := &build.File{} - if err := v.Decode(&f); err != nil { + // Note that the filename is only filled in the Go value, and not the CUE value. + // This makes no difference to the logic, but saves a non-trivial amount of evaluator work. + f := &build.File{Filename: filename} + if err := fileVal.Decode(&f); err != nil { return nil, errors.Wrapf(err, token.NoPos, "could not determine file type") } return f, nil } -func parseType(s string, mode Mode) (inst, val cue.Value, err error) { - i := cuegenValue - i = i.Unify(i.Lookup("modes", mode.String())) - v := i.LookupDef("File") +func parseType(scope string, mode Mode) (modeVal, fileVal cue.Value, _ error) { + modeVal = typesValue.LookupPath(cue.MakePath(cue.Str("modes"), cue.Str(mode.String()))) + fileVal = modeVal.LookupPath(cue.MakePath(cue.Str("File"))) - if s != "" { - for _, t := range strings.Split(s, "+") { - if p := strings.IndexByte(t, '='); p >= 0 { - v = v.Fill(t[p+1:], "tags", t[:p]) + if scope != "" { + for _, tag := range strings.Split(scope, "+") { + tagName, tagVal, ok := strings.Cut(tag, "=") + if ok { + fileVal = fileVal.FillPath(cue.MakePath(cue.Str("tags"), cue.Str(tagName)), tagVal) } else { - info := i.Lookup("tags", t) + info := typesValue.LookupPath(cue.MakePath(cue.Str("tags"), cue.Str(tag))) if !info.Exists() { - return inst, val, errors.Newf(token.NoPos, - "unknown filetype %s", t) + return cue.Value{}, cue.Value{}, errors.Newf(token.NoPos, "unknown filetype %s", tag) } - v = v.Unify(info) + fileVal = fileVal.Unify(info) } } } - return i, v, nil + return modeVal, fileVal, nil } // fileExt is like filepath.Ext except we don't treat file names starting with "." as having an extension diff --git a/vendor/cuelang.org/go/internal/filetypes/test.cue b/vendor/cuelang.org/go/internal/filetypes/test.cue deleted file mode 100644 index a348041ace..0000000000 --- a/vendor/cuelang.org/go/internal/filetypes/test.cue +++ /dev/null @@ -1,19 +0,0 @@ -file: { - - filename: "foo.json" - encoding: "json" - form: string | *"" - - // extensions[".json"] - - form: "schema" -} & json - -// tags maps command line tags to file properties. -json: { - encoding: "json" - form: _ - if form == "schema" { - interpretations: *"jsonschema" | _ - } -} diff --git a/vendor/cuelang.org/go/internal/filetypes/types.cue b/vendor/cuelang.org/go/internal/filetypes/types.cue index 6bc5d2b3f9..9ebd42397b 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types.cue +++ b/vendor/cuelang.org/go/internal/filetypes/types.cue @@ -17,15 +17,13 @@ package build // This file describes how various cross-cutting modes influence default // settings. // -// It is used by gen.go to compile the instance into Go data, which is then +// It is used by types.go to compile a cue.Value, which is then // used by the rest of the package to determine settings. -// -// There // A File corresponds to a Go build.File. #File: { - filename: string - encoding: #Encoding + filename?: string // only filled in FromFile, but not in ParseFile + encoding!: #Encoding interpretation?: #Interpretation form?: #Form tags?: [string]: string @@ -44,35 +42,47 @@ package build // For each of these fields it is explained what a true value means // for encoding/decoding. - data: *true | false // include/allow regular fields - references: *true | false // don't resolve/allow references - cycles: *true | false // cycles are permitted - definitions: bool // include/allow definition fields - optional: bool // include/allow definition fields - constraints: bool // include/allow constraints - keepDefaults: bool // select/allow default values - incomplete: bool // permit incomplete values - imports: bool // don't expand/allow imports - stream: bool // permit streaming - docs: bool // show/allow docs - attributes: bool // include/allow attributes + data: *true | false // include/allow regular fields + references: *true | false // don't resolve/allow references + cycles: *true | false // cycles are permitted + definitions?: bool // include/allow definition fields + optional?: bool // include/allow definition fields + constraints?: bool // include/allow constraints + keepDefaults?: bool // select/allow default values + incomplete?: bool // permit incomplete values + imports?: bool // don't expand/allow imports + stream?: bool // permit streaming + docs?: bool // show/allow docs + attributes?: bool // include/allow attributes +} + +// knownExtensions derives all the known file extensions +// from those that are mentioned in modes, +// allowing us to quickly discard files with unknown extensions. +knownExtensions: { + for mode in modes + for ext, _ in mode.extensions { + (ext): true + } } // modes sets defaults for different operational modes. -// -// These templates are intended to be unified in at the root of this -// configuration. -modes: _ +modes: [string]: { + // TODO(mvdan): Document these once they are better understood. + // Perhaps make them required as well. + File: #File + FileInfo: #FileInfo + Default: #Default +} // input defines modes for input, such as import, eval, vet or def. // In input mode, settings flags are interpreted as what is allowed to occur // in the input. The default settings, therefore, tend to be permissive. modes: input: { - #Default: { + Default: { encoding: *"cue" | _ - ... } - #FileInfo: x, let x = { + FileInfo: { docs: *true | false attributes: *true | false } @@ -85,11 +95,10 @@ modes: input: { } modes: export: { - #Default: { + Default: { encoding: *"json" | _ - ... } - #FileInfo: x, let x = { + FileInfo: { docs: true | *false attributes: true | *false } @@ -98,8 +107,12 @@ modes: export: { } } -modes: ouptut: { - #FileInfo: x, let x = { +// TODO(mvdan): this "output" mode appears to be unused at the moment. +modes: output: { + Default: { + encoding: *"cue" | _ + } + FileInfo: { docs: true | *false attributes: true | *false } @@ -110,11 +123,10 @@ modes: ouptut: { // eval is a legacy mode modes: eval: { - #Default: { + Default: { encoding: *"cue" | _ - ... } - #FileInfo: x, let x = { + FileInfo: { docs: true | *false attributes: true | *false } @@ -124,11 +136,10 @@ modes: eval: { } modes: def: { - #Default: { + Default: { encoding: *"cue" | _ - ... } - #FileInfo: x, let x = { + FileInfo: { docs: *true | false attributes: *true | false } @@ -137,26 +148,6 @@ modes: def: { } } -// Extension maps file extensions to default file properties. -extensions: { - "": _ - ".cue": tags.cue - ".json": tags.json - ".jsonl": tags.jsonl - ".ldjson": tags.jsonl - ".ndjson": tags.jsonl - ".yaml": tags.yaml - ".yml": tags.yaml - ".txt": tags.text - ".go": tags.go - ".proto": tags.proto - ".textproto": tags.textproto - ".textpb": tags.textproto // perhaps also pbtxt - - // TODO: jsonseq, - // ".pb": tags.binpb // binarypb -} - // A Encoding indicates a file format for representing a program. #Encoding: !="" // | error("no encoding specified") @@ -166,75 +157,96 @@ extensions: { #Interpretation: string #Form: string -file: #FileInfo & { +modes: [string]: { + // extensions maps a file extension to its associated default file properties. + extensions: { + // "": _ + ".cue": tags.cue + ".json": tags.json + ".jsonl": tags.jsonl + ".ldjson": tags.jsonl + ".ndjson": tags.jsonl + ".yaml": tags.yaml + ".yml": tags.yaml + ".txt": tags.text + ".go": tags.go + ".proto": tags.proto + ".textproto": tags.textproto + ".textpb": tags.textproto // perhaps also pbtxt + + // TODO: jsonseq, + // ".pb": tags.binpb // binarypb + } - filename: "foo.json" - form: "schema" -} + // encodings: "": error("no encoding specified") -// tags maps command line tags to file properties. -tags: { - schema: form: "schema" - graph: form: "graph" - dag: form: "dag" - data: form: "data" + encodings: cue: { + stream: false + } - cue: encoding: "cue" + encodings: json: { + forms.data + stream: *false | true + docs: false + attributes: false + } - json: encoding: "json" - jsonl: encoding: "jsonl" - yaml: encoding: "yaml" - proto: encoding: "proto" - textproto: encoding: "textproto" - // "binpb": encodings.binproto + encodings: yaml: { + forms.graph + stream: false | *true + } - // pb is used either to indicate binary encoding, or to indicate - pb: *{ - encoding: "binarypb" - interpretation: "" - } | { - encoding: !="binarypb" - interpretation: "pb" + encodings: jsonl: { + forms.data + stream: true } - text: { - encoding: "text" - form: "data" + encodings: text: { + forms.data + stream: false } - binary: { - encoding: "binary" - form: "data" + + encodings: binary: { + forms.data + stream: false } - go: { - encoding: "code" - interpretation: "" - tags: lang: "go" + + encodings: toml: { + forms.data + stream: false } - code: { - encoding: "code" - interpretation: "" - tags: lang: string + + encodings: proto: { + forms.schema + encoding: "proto" } - auto: { - interpretation: "auto" - encoding: *"json" | _ + encodings: textproto: { + forms.data + encoding: "textproto" + stream: false } - jsonschema: { - interpretation: "jsonschema" - encoding: *"json" | _ + + encodings: binarypb: { + forms.data + encoding: "binarypb" + stream: false } - openapi: { - interpretation: "openapi" - encoding: *"json" | _ + + // encodings: binproto: { + // forms.DataEncoding + // encoding: "binproto" + // } + + encodings: code: { + forms.schema + stream: false } } // forms defines schema for all forms. It does not include the form ID. forms: [Name=string]: #FileInfo -forms: "": _ - forms: schema: { form: *"schema" | "final" | "graph" stream: true | *false @@ -286,75 +298,8 @@ forms: data: { optional: false } -// encodings: "": error("no encoding specified") - -encodings: cue: { - stream: false -} - -encodings: json: { - forms.data - stream: *false | true - docs: false - attributes: false -} - -encodings: yaml: { - forms.graph - stream: false | *true -} - -encodings: jsonl: { - forms.data - stream: true -} - -encodings: text: { - forms.data - stream: false -} - -encodings: binary: { - forms.data - stream: false -} - -encodings: toml: { - forms.data - stream: false -} - -encodings: proto: { - forms.schema - encoding: "proto" -} - -encodings: textproto: { - forms.data - encoding: "textproto" - stream: false -} - -encodings: binarypb: { - forms.data - encoding: "binarypb" - stream: false -} - -// encodings: binproto: { -// forms.DataEncoding -// encoding: "binproto" -// } - -encodings: code: { - forms.schema - stream: false -} - interpretations: [Name=string]: #FileInfo -interpretations: "": _ - interpretations: auto: { forms.schema } @@ -373,3 +318,61 @@ interpretations: pb: { forms.data stream: true } + +// tags maps command line tags to file properties. +tags: { + schema: form: "schema" + graph: form: "graph" + dag: form: "dag" + data: form: "data" + + cue: encoding: "cue" + + json: encoding: "json" + jsonl: encoding: "jsonl" + yaml: encoding: "yaml" + proto: encoding: "proto" + textproto: encoding: "textproto" + // "binpb": encodings.binproto + + // pb is used either to indicate binary encoding, or to indicate + pb: *{ + encoding: "binarypb" + interpretation: "" + } | { + encoding: !="binarypb" + interpretation: "pb" + } + + text: { + encoding: "text" + form: "data" + } + binary: { + encoding: "binary" + form: "data" + } + go: { + encoding: "code" + interpretation: "" + tags: lang: "go" + } + code: { + encoding: "code" + interpretation: "" + tags: lang: *"" | string + } + + auto: { + interpretation: "auto" + encoding: *"json" | _ + } + jsonschema: { + interpretation: "jsonschema" + encoding: *"json" | _ + } + openapi: { + interpretation: "openapi" + encoding: *"json" | _ + } +} diff --git a/vendor/cuelang.org/go/internal/filetypes/types.go b/vendor/cuelang.org/go/internal/filetypes/types.go index 5eb7307b13..21981b7eb0 100644 --- a/vendor/cuelang.org/go/internal/filetypes/types.go +++ b/vendor/cuelang.org/go/internal/filetypes/types.go @@ -1,48 +1,40 @@ -// Code generated by gocode.Generate; DO NOT EDIT. +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package filetypes import ( - "fmt" + _ "embed" + "sync" "cuelang.org/go/cue" - "cuelang.org/go/encoding/gocode/gocodec" - _ "cuelang.org/go/pkg" + "cuelang.org/go/cue/cuecontext" ) -var cuegenCodec, cuegenInstance_, cuegenValue = func() (*gocodec.Codec, *cue.Instance, cue.Value) { - var r *cue.Runtime - r = &cue.Runtime{} - instances, err := r.Unmarshal(cuegenInstanceData) - if err != nil { - panic(err) - } - if len(instances) != 1 { - panic("expected encoding of exactly one instance") - } - return gocodec.New(r, nil), instances[0], instances[0].Value() -}() +//go:embed types.cue +var typesCUE string -// Deprecated: cue.Instance is deprecated. Use cuegenValue instead. -var cuegenInstance = cuegenInstance_ +var typesValue cue.Value +var knownExtensions map[string]bool -// cuegenMake is called in the init phase to initialize CUE values for -// validation functions. -func cuegenMake(name string, x interface{}) cue.Value { - f, err := cuegenValue.FieldByName(name, true) - if err != nil { - panic(fmt.Errorf("could not find type %q in instance", name)) +var typesInit = sync.OnceFunc(func() { + ctx := cuecontext.New() + typesValue = ctx.CompileString(typesCUE, cue.Filename("types.cue")) + if err := typesValue.Err(); err != nil { + panic(err) } - v := f.Value - if x != nil { - w, err := cuegenCodec.ExtractType(x) - if err != nil { - panic(err) - } - v = v.Unify(w) + if err := typesValue.LookupPath(cue.MakePath(cue.Str("knownExtensions"))).Decode(&knownExtensions); err != nil { + panic(err) } - return v -} - -// Data size: 1703 bytes. -var cuegenInstanceData = []byte("\x01\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4X\u074b\xe4\xc6\x11\x97\xf6.\x105N\x1e\ry\b\xd4\xe9\xc08\xcbE\x8b?\xc8\xc3\xc0r\x84\xdc]\xb8\x978\x04\xe7\u0258\xa1G*\xcdt,u+\xdd-{\x17\xef\x90\xc4q\xf2g{Cu\xb7\u0512F\xbb{\v\x0e\u0797\x9d\xa9_WuUu}\xce/n\xffs\x96\x9e\xdd\xfe7Io\xff\x99$\xbf\xfb\u01d34}OHc\xb9,\xf1\x15\xb7\x9c\xc8\xe9\x93\xf4\xe9_\x94\xb2\xe9Y\x92>\xfd3\xb7\x87\xf4\xbd$\xfd\xd9\x1b\u0460Io\xbfO\x92\xe4\u05f7\xff>K\xd3_~\xf1e\xd9cQ\x8b&p~\x9f\xa4\xb7\xdf%\u0247\xb7\xffz\x92\xa6?\x8f\xf4\xef\x92\xf4,}\xfa'\xde\"\tz\xea\x88,I\x92\x1f\xde\xff\x15)\x92\xa6gi\x9a\xd9\xeb\x0eMQ\xf6\x98\xfe\xf0~\xd2\xf1\xf2+\xbeG\xd8\xf5\xa2\xa9\x18\xbb\xb8\x80\xdf\x03\xdd\x0f\xa5\xd2\x1aM\xa7de\xc0*\xe0\xf0G\xe5\x0f\x15\x04\x17\xec9\xfd\xdb\xc0\xb7,\xa3\xeb%oq\x03\xe1\xcfX-\xe4\x9ee(KU\t\xb9\x1f\x81\xe7\xaf\x03\x85eBZ\u051dF\u02edP\xf2\xe5\x06\x9e\xbf\x9dQXV+\u077e\x1cY\x89\xfb\x8d\xd2-\xcb,\u07db\x97\xee\xe2\xec\v\x7f\u04d7\x9b\xf1\xca#;:#^a\xcd\xfb\u01820`\x0f\b\xa4\"\xf4\x06+\xa8\x95\x06c+!\x81\u02ca>\xa9\xde\x16\xf0\xf9\x01\xc1\xa0\xb5B\xee\rT\u0621\xacH\x8a\x92\x91\xbbU\x15Y\x1d\x04o\xc0\xd9\x0f\x1f\xcc\x1dp\x9e\xff6\x87\x9bA\x9b\xe3\u011foe\xad\xa0\xc2ZH4pP\xdf\x00\xf7b\x85\x01\xe7&\xac\x9cB\xa3[\xb0\n.&Fg\xad\xfb\u01b2\x8a[\x1e\xbdrnu\x8fp\x035o\f\xb2Lc\x8d\x1ae\x89fs\n\x96\xd7e\xe3\x81\x15N\xa7\x9a \xcf\u04c9\x9dR\r\xcbTG\xdfy\xe3Y<\xadT\xd2X\u0345\xb4\xf1\xdcW\x88]\xf0\x8b\xd9\x04\x9a\x90\xa5j\xbb\x06\xad\v\x8b@k;\xa5\xed\xa0\x81\xa7\x19\xab\x91\xb7\x83R\x9eV\xa9\xd2D\x13=\x8d[\xab\u016e\xb7\xde\x00G\xf3\xee\xa5w1\xf4x\xf4p^\a\xf7\u0215\xa8\x9d/,\xa8\x0e5\xf7\x96\xf8\xd3\x05\xbb\xb8 \xd6\xcf\x0fh\x10,\xb6]\xc3-\x1a\xe0\x1a\xdd\x03Hz\r\xab`\x87\xd0KQ\v\xa4w\x01n]0h\xa5,\xa8\x1a\xecA\x18\x12R*Y\x8b}\xefo(\x98\xbb\xc0\xbd\x97\x90]o}\x9c6h\xe1\n.\xdd\xe7\x99u\x8bG\xc8ff.\xc1#\u02f2\x18\x7fNV\u0330\xf3\xbc\xec\x91boK\xf4\xa2(\x06\x86\x18CW,2\x98 \xa0\xec)j)\xd5La\xca\x03\xb6<\x88 ^\xbc\xb2(\x8d\x0f\tw:/\xfef\x94\xcc\u00f7E\x0e\x93\x0e\xbc\xb7jT\xe2\xe8Y\xaey\xdb<\x96\xe5q\x1cG\xca\xfb\f\xaf(\xba&\x0e\xdf~\xb4\xe6\xf2\xe0\xd4\xf3U\x97/\xc1\a\\\xee\xbcq\xbf\u03f7\x1f=\xe0u\xca\xe7\xe8\xf3#\xcbT\xdf\xd9Y\xe0l?\xfeq\xec\x98j\xf5\xf1c\xb5\u00af\xa9\x0eD\x9d>\xf9\x7f\xfb\xf6\xe1p\xde~\xf2\x80\x11\xb5\xa0\x94\x9fZQa=5\xe2\u04df>'\xb7\x9f>2+\x87\x0e\xf7zHNhyg|3\x89\tK\xe5+\x94C\x0fu\x9a\u02a0\x15T\xfd\x16y\x9d\xe7\xd3.\xbbeYN\xc3\xc1H\xa4~K\x04\x16\xd3?\u04890\x00M@F\xa0!\xa4\xa9\"\xd3\x1c\x91w\"\xa1dDiD`caX\x01\uc55d\x03\x16\xaf,\x01{\x15\xads\xc0^\x11\xb9\xd3\u02aa\xa9\xbe\x8e\xe0$\xe1\x95\x1d\xd0Q\xd2\x1c\xddMt\x8e(\u02e8\xa5|\xf6\xea\xb3\r\x90!\x06\xff\xfe\u0091\xf2b`\x18\x99vBv;\xb8\xb8\x80\x9d\x90\\_w\xbbqT\x18\x06$\x10\xb2\x12\xa5\xefJ\xfe\x01)\x1a\xb8u\xadMc\xa7\u0460\xa4q\x058=\xed^\xf3\xb6`\xe3x\xb5\x81g\x97y\xeeEJ\x98\x0fVP\xa1E\xddN\xe6\x90\x12\xb5\xe5B\x0er\xc0\x1cT\xdfT\xd4\xfdf\xd3\xc8\xc5\x05\xbcQ\x1a\x86\x11\xf6\x05\xb8\x1a\xd1\xf2\xeb\xc5I\xe0\u0509M\xa9\xc5\xce\xeb\xe7#\xf8\x05|s\x10\xe5\x01\x845\xd8\u052esrI\xac\xa5\x92_\xa3\xb6\xbe\xe5r\xf8\xc3__\a\x8e\x82-f\xc2q\xccs\x93\xe04h\x03\xbdv#\xe9\x98^\xcb\xe9,\xaf\x95\xf2!\xec\xa7K/!\xf7\xb7\xe5\xe1\r\xe8\x81|J\x95\xaami&k\x84DO\xb6\xea4\x99\bpi\xe4\xc5\xf8\f\xf6\xd2G\u0254\xb7{\u037b\xc3\fu\x14\x0fV|?\x83*\xbe\x1f\x00\xcb\x17\x88\r\x02]\x91\xf8\x96M\v\x8e\xab7\x0e$+O\xd0`z\x80\x9bU\xbc\xf1\a(\xafNp\x97\x96\x0ev\x11\x7f\x82\xfb\xb4q\a\u01b489\x14\xf3\x8b\x0e\xba\f\xe9v4\x06\xbb\xf1\x1c\x85=\xa0&G\x0f\t\x10r\x04\x06\x11/@\xcdp\x96u\xbb\r\x9c\xcfo\xf1\x7f\xf9\x90^9;\x9d#r\xba\x1fn`\x8d\xf1\xd9\xe5\xfd\xac\x8e\x1c\xac\\50\x1f\x1f\xcc\xe9\x11\x1f\u034b=\xe1\xf1\xe4;\xb9\xf6'n\f\x06\xd2\xe2p\x97q\xd9\x18\x99Y\xd6pw\u035e\x9c\x1eZ!\xb1\xfe(R\x87\xd5+\u0225\xe9\xcc\xe3'\xecnp[\xb9p6H\x85\xe8\x9cf\u04c9\xa0x\xe0]\u0129\x0e%\xef\xc4\x1d\xb2\x02\xfa\x0e\x82|}p]y\xdc\xe4Bw\xa6\xaa\u031b\u0183\x05\xbc\xb5P)4 \x95\x05!\u02e6\xaf\xd0/\x92J\xb7\xf0\xf6U\xc1\xdc9\xa7\x90[cia\xbf\x1cw\u0671~9\xed\xa9;o\u05ea\v\x8cZ\x06W\xc0\r\xe4n\xe4q\x9f\x86\xea\xb2\u0630\x96S\xd8|O[\x8e7\xf3\xadp\x89\xce\xf7\xc3\x0fg\xf0o\xe0\x83%\x85e\x8b\xedq)o\xbeG.\xd1\xf9\xf6\xb8@\x8fT\xe7\xe50\xa2N'\xa7\x13\x7f\x05\x1f\x9d\u0737nU\x94\x7fR\xc0\xe3\x03x_\x93\u05e9p\xfb\xff.w\x17\xdb:\xe9|\xe2\xf3u_\u07eb\xcd\u008f\xeb\xfe[\xf7[\xb4g\xd6sL\xe1l\x98\xd8\xf6\xec2\x86\xd0\xf0\xcb\xc1\x94y\u0697h_\xd8/\xfd\xf2\xec2\xb4\xb1\xb9\xb6\x83Z\xb3\x9f*F\xbb\xa6?Q\xac\x1a\xb0\xea\x97Q\xaf#\x9b\x8f\xd2c\x8f\x1c\x92 Z\x10;d\xdcx\x16\xd9\xe2\x93\x04n\x86w\x9bn\t\x83\x1e\xd3\xe5 \n\x8f\xeds\xee\u0719\x1a\x94\x86^\xf2\xbc#\xaf\xea3\x1e\x8c=g\xf5\\\xd4a\xdaj\x1e8jU{\xdf\xdd\xf1\u0925/r\xec\x9d\u0180\x99\xf4;f\x82\xc9\v,m\xa1F\x7f\x9f\x98i\xcf^\x93\x12[\xdeB\xf9\x13C\x8fl\xde'\x1eQ\xab\xdd\x06\xe5\x9b\xe0\xfc\x96eW\xbb\u04c1\xf7\xf6\xafw\xe6Zu\xd62\x9a\x8e,I\xfe\x17\x00\x00\xff\xffj\xd7\xe3\u00e8\x16\x00\x00") +}) diff --git a/vendor/cuelang.org/go/internal/filetypes/util.go b/vendor/cuelang.org/go/internal/filetypes/util.go index bfca2dbe44..e40becfd4b 100644 --- a/vendor/cuelang.org/go/internal/filetypes/util.go +++ b/vendor/cuelang.org/go/internal/filetypes/util.go @@ -23,14 +23,14 @@ import ( // IsPackage reports whether a command-line argument is a package based on its // lexical representation alone. func IsPackage(s string) bool { - if s == "." || s == ".." { + switch s { + case ".", "..": return true - } - if s == "-" { + case "", "-": return false } - // This goes of the assumption that file names may not have a `:` in their + // This goes off the assumption that file names may not have a `:` in their // name in cue. // A filename must have an extension or be preceded by a qualifier argument. // So strings of the form foo/bar:baz, where bar is a valid identifier and diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/gopls_windows.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/gopls_windows.go new file mode 100644 index 0000000000..81c02d1332 --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/gopls_windows.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import "syscall" + +// The robustio package is copied from cmd/go/internal/robustio, a package used +// by the go command to retry known flaky operations on certain operating systems. + +// Since the gopls module cannot access internal/syscall/windows, copy a +// necessary constant. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio.go new file mode 100644 index 0000000000..0a559fc9b8 --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio.go @@ -0,0 +1,69 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +import "time" + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} + +// A FileID uniquely identifies a file in the file system. +// +// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file +// names denote the same file. +// A FileID is comparable, and thus suitable for use as a map key. +type FileID struct { + device, inode uint64 +} + +// GetFileID returns the file system's identifier for the file, and its +// modification time. +// Like os.Stat, it reads through symbolic links. +func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_darwin.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_darwin.go new file mode 100644 index 0000000000..99fd8ebc2f --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_flaky.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_flaky.go new file mode 100644 index 0000000000..d5c241857b --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_flaky.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin +// +build windows darwin + +package robustio + +import ( + "errors" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = os.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_other.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_other.go new file mode 100644 index 0000000000..3a20cac6cf --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_other.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin +// +build !windows,!darwin + +package robustio + +import ( + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_plan9.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_plan9.go new file mode 100644 index 0000000000..9fa4cacb5a --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_plan9.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + dir := fi.Sys().(*syscall.Dir) + return FileID{ + device: uint64(dir.Type)<<32 | uint64(dir.Dev), + inode: dir.Qid.Path, + }, fi.ModTime(), nil +} diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_posix.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_posix.go new file mode 100644 index 0000000000..8aa13d0278 --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_posix.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +// TODO(adonovan): use 'unix' tag when go1.19 can be assumed. + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + stat := fi.Sys().(*syscall.Stat_t) + return FileID{ + device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) + inode: stat.Ino, + }, fi.ModTime(), nil +} diff --git a/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_windows.go b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_windows.go new file mode 100644 index 0000000000..616c32883d --- /dev/null +++ b/vendor/cuelang.org/go/internal/golangorgx/tools/robustio/robustio_windows.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + "time" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} + +// Note: it may be convenient to have this helper return fs.FileInfo, but +// implementing this is actually quite involved on Windows. Since we only +// currently use mtime, keep it simple. +func getFileID(filename string) (FileID, time.Time, error) { + filename16, err := syscall.UTF16PtrFromString(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) + if err != nil { + return FileID{}, time.Time{}, err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &i); err != nil { + return FileID{}, time.Time{}, err + } + mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) + return FileID{ + device: uint64(i.VolumeSerialNumber), + inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), + }, mtime, nil +} diff --git a/vendor/cuelang.org/go/internal/internal.go b/vendor/cuelang.org/go/internal/internal.go index 9c85d0f717..bb459deade 100644 --- a/vendor/cuelang.org/go/internal/internal.go +++ b/vendor/cuelang.org/go/internal/internal.go @@ -15,14 +15,13 @@ // Package internal exposes some cue internals to other packages. // // A better name for this package would be technicaldebt. -package internal // import "cuelang.org/go/internal" +package internal // TODO: refactor packages as to make this package unnecessary. import ( "bufio" "fmt" - "os" "path/filepath" "strings" @@ -91,9 +90,6 @@ func (c Context) Sqrt(d, x *apd.Decimal) (apd.Condition, error) { // incomplete. var ErrIncomplete = errors.New("incomplete value") -// MakeInstance makes a new instance from a value. -var MakeInstance func(value interface{}) (instance interface{}) - // BaseContext is used as CUE's default context for arbitrary-precision decimals. var BaseContext = Context{*apd.BaseContext.WithPrecision(34)} @@ -111,6 +107,16 @@ func Version(minor, patch int) int { return -1000 + 100*minor + patch } +type EvaluatorVersion int + +const ( + DefaultVersion EvaluatorVersion = iota + + // The DevVersion is used for new implementations of the evaluator that + // do not cover all features of the CUE language yet. + DevVersion +) + // ListEllipsis reports the list type and remaining elements of a list. If we // ever relax the usage of ellipsis, this function will likely change. Using // this function will ensure keeping correct behavior or causing a compiler @@ -144,31 +150,23 @@ func GetPackageInfo(f *ast.File) PkgInfo { case *ast.Attribute: case *ast.Package: if x.Name == nil { - break + return PkgInfo{} } return PkgInfo{x, i, x.Name.Name} + default: + return PkgInfo{} } } return PkgInfo{} } -// Deprecated: use GetPackageInfo -func PackageInfo(f *ast.File) (p *ast.Package, name string, tok token.Pos) { - x := GetPackageInfo(f) - if p := x.Package; p != nil { - return p, x.Name, p.Name.Pos() - } - return nil, "", f.Pos() -} - func SetPackage(f *ast.File, name string, overwrite bool) { - p, str, _ := PackageInfo(f) - if p != nil { - if !overwrite || str == name { + if pi := GetPackageInfo(f); pi.Package != nil { + if !overwrite || pi.Name == name { return } ident := ast.NewIdent(name) - astutil.CopyMeta(ident, p.Name) + astutil.CopyMeta(ident, pi.Package.Name) return } @@ -229,9 +227,8 @@ func NewComment(isDoc bool, s string) *ast.CommentGroup { } func FileComment(f *ast.File) *ast.CommentGroup { - pkg, _, _ := PackageInfo(f) var cgs []*ast.CommentGroup - if pkg != nil { + if pkg := GetPackageInfo(f).Package; pkg != nil { cgs = pkg.Comments() } else if cgs = f.Comments(); len(cgs) > 0 { // Use file comment. @@ -309,18 +306,22 @@ func ToExpr(n ast.Node) ast.Expr { // // Adjusts the spacing of x when needed. func ToFile(n ast.Node) *ast.File { - switch x := n.(type) { - case nil: + if n == nil { return nil + } + switch n := n.(type) { case *ast.StructLit: - return &ast.File{Decls: x.Elts} + f := &ast.File{Decls: n.Elts} + // Ensure that the comments attached to the struct literal are not lost. + ast.SetComments(f, ast.Comments(n)) + return f case ast.Expr: - ast.SetRelPos(x, token.NoSpace) - return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: x}}} + ast.SetRelPos(n, token.NoSpace) + return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: n}}} case *ast.File: - return x + return n default: - panic(fmt.Sprintf("Unsupported node type %T", x)) + panic(fmt.Sprintf("Unsupported node type %T", n)) } } @@ -435,17 +436,6 @@ func IsEllipsis(x ast.Decl) bool { // GenPath reports the directory in which to store generated files. func GenPath(root string) string { - info, err := os.Stat(filepath.Join(root, "cue.mod")) - if os.IsNotExist(err) || !info.IsDir() { - // Try legacy pkgDir mode - pkgDir := filepath.Join(root, "pkg") - if err == nil && !info.IsDir() { - return pkgDir - } - if info, err := os.Stat(pkgDir); err == nil && info.IsDir() { - return pkgDir - } - } return filepath.Join(root, "cue.mod", "gen") } diff --git a/vendor/cuelang.org/go/internal/mod/modfile/modfile.go b/vendor/cuelang.org/go/internal/mod/modfile/modfile.go deleted file mode 100644 index 353b92dc7c..0000000000 --- a/vendor/cuelang.org/go/internal/mod/modfile/modfile.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2023 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package modfile - -import ( - _ "embed" - "fmt" - "sync" - - "cuelang.org/go/internal/mod/semver" - - "cuelang.org/go/cue" - "cuelang.org/go/cue/cuecontext" - "cuelang.org/go/cue/errors" - "cuelang.org/go/cue/parser" - "cuelang.org/go/cue/token" - "cuelang.org/go/internal/mod/module" -) - -//go:embed schema.cue -var moduleSchemaData []byte - -type File struct { - Module string `json:"module"` - Language Language `json:"language"` - Deps map[string]*Dep `json:"deps,omitempty"` - versions []module.Version -} - -type Language struct { - Version string `json:"version"` -} - -type Dep struct { - Version string `json:"v"` - Default bool `json:"default,omitempty"` -} - -type noDepsFile struct { - Module string `json:"module"` -} - -var ( - moduleSchemaOnce sync.Once - _moduleSchema cue.Value -) - -func moduleSchema() cue.Value { - moduleSchemaOnce.Do(func() { - ctx := cuecontext.New() - schemav := ctx.CompileBytes(moduleSchemaData, cue.Filename("cuelang.org/go/internal/mod/modfile/schema.cue")) - schemav = lookup(schemav, cue.Def("#File")) - //schemav = schemav.Unify(lookup(schemav, cue.Hid("#Strict", "_"))) - if err := schemav.Validate(); err != nil { - panic(fmt.Errorf("internal error: invalid CUE module.cue schema: %v", errors.Details(err, nil))) - } - _moduleSchema = schemav - }) - return _moduleSchema -} - -func lookup(v cue.Value, sels ...cue.Selector) cue.Value { - return v.LookupPath(cue.MakePath(sels...)) -} - -// Parse verifies that the module file has correct syntax. -// The file name is used for error messages. -// All dependencies must be specified correctly: with major -// versions in the module paths and canonical dependency -// versions. -func Parse(modfile []byte, filename string) (*File, error) { - return parse(modfile, filename, true) -} - -// ParseLegacy parses the legacy version of the module file -// that only supports the single field "module" and ignores all other -// fields. -func ParseLegacy(modfile []byte, filename string) (*File, error) { - v := moduleSchema().Context().CompileBytes(modfile, cue.Filename(filename)) - if err := v.Err(); err != nil { - return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file") - } - var f noDepsFile - if err := v.Decode(&f); err != nil { - return nil, newCUEError(err, filename) - } - return &File{ - Module: f.Module, - }, nil -} - -// ParseNonStrict is like Parse but allows some laxity in the parsing: -// - if a module path lacks a version, it's taken from the version. -// - if a non-canonical version is used, it will be canonicalized. -// -// The file name is used for error messages. -func ParseNonStrict(modfile []byte, filename string) (*File, error) { - return parse(modfile, filename, false) -} - -func parse(modfile []byte, filename string, strict bool) (*File, error) { - file, err := parser.ParseFile(filename, modfile) - if err != nil { - return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax") - } - // TODO disallow non-data-mode CUE. - - v := moduleSchema().Context().BuildFile(file) - if err := v.Validate(cue.Concrete(true)); err != nil { - return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file value") - } - v = v.Unify(moduleSchema()) - if err := v.Validate(); err != nil { - return nil, newCUEError(err, filename) - } - var mf File - if err := v.Decode(&mf); err != nil { - return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into modFile struct") - } - if strict { - _, v, ok := module.SplitPathVersion(mf.Module) - if !ok { - return nil, fmt.Errorf("module path %q in %s does not contain major version", mf.Module, filename) - } - if semver.Major(v) != v { - return nil, fmt.Errorf("module path %s in %q should contain the major version only", mf.Module, filename) - } - } - if v := mf.Language.Version; v != "" && !semver.IsValid(v) { - return nil, fmt.Errorf("language version %q in %s is not well formed", v, filename) - } - var versions []module.Version - // Check that major versions match dependency versions. - for m, dep := range mf.Deps { - v, err := module.NewVersion(m, dep.Version) - if err != nil { - return nil, fmt.Errorf("invalid module.cue file %s: cannot make version from module %q, version %q: %v", filename, m, dep.Version, err) - } - versions = append(versions, v) - if strict && v.Path() != m { - return nil, fmt.Errorf("invalid module.cue file %s: no major version in %q", filename, m) - } - } - - mf.versions = versions[:len(versions):len(versions)] - module.Sort(mf.versions) - return &mf, nil -} - -func newCUEError(err error, filename string) error { - // TODO we have some potential to improve error messages here. - return err -} - -// DepVersions returns the versions of all the modules depended on by the -// file. The caller should not modify the returned slice. -func (f *File) DepVersions() []module.Version { - return f.versions -} diff --git a/vendor/cuelang.org/go/internal/mod/modfile/schema.cue b/vendor/cuelang.org/go/internal/mod/modfile/schema.cue deleted file mode 100644 index 506f2b47f4..0000000000 --- a/vendor/cuelang.org/go/internal/mod/modfile/schema.cue +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2023 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This schema constrains a module.cue file. This form constrains module.cue files -// outside of the main module. For the module.cue file in a main module, the schema -// is less restrictive, because wherever #Semver is used, a less specific version may be -// used instead, which will be rewritten to the canonical form by the cue command tooling. -// To check against that form, - -// Note: we're using 1&2 rather than _|_ because -// use of _|_ causes the source location of the errors -// to be lost. See https://github.com/cue-lang/cue/issues/2319. -let unimplemented = 1&2 - - -// #File represents the overarching module file schema. -#File: { - // Reserve fields that are unimplemented for now. - { - deprecated?: unimplemented - retract?: unimplemented - publish?: unimplemented - #Dep: { - exclude?: unimplemented - replace?: unimplemented - replaceAll?: unimplemented - } - } - // module indicates the module's path. - module?: #Module | "" - - // version indicates the language version used by the code - // in this module - the minimum version of CUE required - // to evaluate the code in this module. When a later version of CUE - // is evaluating code in this module, this will be used to - // choose version-specific behavior. If an earlier version of CUE - // is used, an error will be given. - language?: version?: #Semver - - // description describes the purpose of this module. - description?: string - - // When present, deprecated indicates that the module - // is deprecated and includes information about that deprecation, ideally - // mentioning an alternative that can be used instead. - // TODO implement this. - deprecated?: string - - // deps holds dependency information for modules, keyed by module path. - deps?: [#Module]: #Dep - - #Dep: { - // TODO use the below when mustexist is implemented. - // replace and replaceAll are mutually exclusive. - //mustexist(<=1, replace, replaceAll) - // There must be at least one field specified for a given module. - //mustexist(>=1, v, exclude, replace, replaceAll) - - // v indicates the minimum required version of the module. - // This can be null if the version is unknown and the module - // entry is only present to be replaced. - v!: #Semver | null - - // default indicates this module is used as a default in case - // more than one major version is specified for the same module - // path. Imports must specify the exact major version for a - // module path if there is more than one major version for that - // path and default is not set for exactly one of them. - // TODO implement this. - default?: bool - - // exclude excludes a set of versions of the module - // TODO implement this. - exclude?: [#Semver]: true - - // replace specifies replacements for specific versions of - // the module. This field is exclusive with replaceAll. - // TODO implement this. - replace?: [#Semver]: #Replacement - - // replaceAll specifies a replacement for all versions of the module. - // This field is exclusive with replace. - // TODO implement this. - replaceAll?: #Replacement - } - - // retract specifies a set of previously published versions to retract. - // TODO implement this. - retract?: [... #RetractedVersion] - - // The publish section can be used to restrict the scope of a module to prevent - // accidental publishing. - // TODO complete the definition of this and implement it. - publish?: _ - - // #RetractedVersion specifies either a single version - // to retract, or an inclusive range of versions to retract. - #RetractedVersion: #Semver | { - from!: #Semver - // TODO constrain to to be after from? - to!: #Semver - } - - // #Replacement specifies a replacement for a module. It can either - // be a reference to a local directory or an alternative module with associated - // version. - #Replacement: string | { - m!: #Module - v!: #Semver - } - - // #Module constrains a module path. - // The major version indicator is optional, but should always be present - // in a normalized module.cue file. - #Module: string - - // #Semver constrains a semantic version. - #Semver: =~"." -} - -// _#Strict can be unified with the top level schema to enforce the strict version -// of the schema required by the registry. -#Strict: #File & { - // This regular expression is taken from https://semver.org/spec/v2.0.0.html - #Semver: =~#"^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"# - - // WIP: (([\-_~a-zA-Z0-9][.\-_~a-zA-Z0-9]*[\-_~a-zA-Z0-9])|([\-_~a-zA-Z0-9]))(/([\-_~a-zA-Z0-9][.\-_~a-zA-Z0-9]*[\-_~a-zA-Z0-9])|([\-_~a-zA-Z0-9]))* - #Module: =~#"^[^@]+(@v(0|[1-9]\d*))$"# - - // We don't yet implement local file replacement (specified as a string) - // so require a struct, thus allowing only replacement by some other module. - #Replacement: {...} - - // The module declaration is required. - module!: #Module - - // No null versions, because no replacements yet. - #Dep: v!: #Semver - - // TODO require the CUE version? - // language!: version!: _ -} diff --git a/vendor/cuelang.org/go/internal/mod/modimports/modimports.go b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go new file mode 100644 index 0000000000..49f04708ee --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go @@ -0,0 +1,244 @@ +package modimports + +import ( + "fmt" + "io/fs" + "path" + "sort" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/parser" + "cuelang.org/go/internal/cueimports" + "cuelang.org/go/mod/module" +) + +type ModuleFile struct { + // FilePath holds the path of the module file + // relative to the root of the fs. This will be + // valid even if there's an associated error. + // + // If there's an error, it might not a be CUE file. + FilePath string + + // Syntax includes only the portion of the file up to and including + // the imports. It will be nil if there was an error reading the file. + Syntax *ast.File +} + +// AllImports returns a sorted list of all the package paths +// imported by the module files produced by modFilesIter +// in canonical form. +func AllImports(modFilesIter func(func(ModuleFile, error) bool)) (_ []string, retErr error) { + pkgPaths := make(map[string]bool) + modFilesIter(func(mf ModuleFile, err error) bool { + if err != nil { + retErr = fmt.Errorf("cannot read %q: %v", mf.FilePath, err) + return false + } + // TODO look at build tags and omit files with "ignore" tags. + for _, imp := range mf.Syntax.Imports { + pkgPath, err := strconv.Unquote(imp.Path.Value) + if err != nil { + // TODO location formatting + retErr = fmt.Errorf("invalid import path %q in %s", imp.Path.Value, mf.FilePath) + return false + } + // Canonicalize the path. + pkgPath = module.ParseImportPath(pkgPath).Canonical().String() + pkgPaths[pkgPath] = true + } + return true + }) + if retErr != nil { + return nil, retErr + } + // TODO use maps.Keys when we can. + pkgPathSlice := make([]string, 0, len(pkgPaths)) + for p := range pkgPaths { + pkgPathSlice = append(pkgPathSlice, p) + } + sort.Strings(pkgPathSlice) + return pkgPathSlice, nil +} + +// PackageFiles returns an iterator that produces all the CUE files +// inside the package with the given name at the given location. +// If pkgQualifier is "*", files from all packages in the directory will be produced. +// +// TODO(mvdan): this should now be called InstanceFiles, to follow the naming from +// https://cuelang.org/docs/concept/modules-packages-instances/#instances. +func PackageFiles(fsys fs.FS, dir string, pkgQualifier string) func(func(ModuleFile, error) bool) { + return func(yield func(ModuleFile, error) bool) { + // Start at the target directory, but also include package files + // from packages with the same name(s) in parent directories. + // Stop the iteration when we find a cue.mod entry, signifying + // the module root. If the location is inside a `cue.mod` directory + // already, do not look at parent directories - this mimics historic + // behavior. + selectPackage := func(pkg string) bool { + if pkgQualifier == "*" { + return true + } + return pkg == pkgQualifier + } + inCUEMod := false + if before, after, ok := strings.Cut(dir, "cue.mod"); ok { + // We're underneath a cue.mod directory if some parent + // element is cue.mod. + inCUEMod = + (before == "" || strings.HasSuffix(before, "/")) && + (after == "" || strings.HasPrefix(after, "/")) + } + var matchedPackages map[string]bool + for { + entries, err := fs.ReadDir(fsys, dir) + if err != nil { + yield(ModuleFile{ + FilePath: dir, + }, err) + return + } + inModRoot := false + for _, e := range entries { + if e.Name() == "cue.mod" { + inModRoot = true + } + pkgName, cont := yieldPackageFile(fsys, path.Join(dir, e.Name()), selectPackage, yield) + if !cont { + return + } + if pkgName != "" { + if matchedPackages == nil { + matchedPackages = make(map[string]bool) + } + matchedPackages[pkgName] = true + } + } + if inModRoot || inCUEMod { + // We're at the module root or we're inside the cue.mod + // directory. Don't go any further up the hierarchy. + return + } + if matchedPackages == nil { + // No packages possible in parent directories if there are + // no matching package files in the package directory itself. + return + } + selectPackage = func(pkgName string) bool { + return matchedPackages[pkgName] + } + parent := path.Dir(dir) + if len(parent) >= len(dir) { + // No more parent directories. + return + } + dir = parent + } + } +} + +// AllModuleFiles returns an iterator that produces all the CUE files inside the +// module at the given root. +// +// The caller may assume that files from the same package are always adjacent. +func AllModuleFiles(fsys fs.FS, root string) func(func(ModuleFile, error) bool) { + return func(yield func(ModuleFile, error) bool) { + yieldAllModFiles(fsys, root, true, yield) + } +} + +// yieldAllModFiles implements AllModuleFiles by recursing into directories. +// +// Note that we avoid [fs.WalkDir]; it yields directory entries in lexical order, +// so we would walk `foo/bar.cue` before walking `foo/cue.mod/` and realizing +// that `foo/` is a nested module that we should be ignoring entirely. +// That could be avoided via extra `fs.Stat` calls, but those are extra fs calls. +// Using [fs.ReadDir] avoids this issue entirely, as we can loop twice. +func yieldAllModFiles(fsys fs.FS, fpath string, topDir bool, yield func(ModuleFile, error) bool) bool { + entries, err := fs.ReadDir(fsys, fpath) + if err != nil { + if !yield(ModuleFile{ + FilePath: fpath, + }, err) { + return false + } + } + // Skip nested submodules entirely. + if !topDir { + for _, entry := range entries { + if entry.Name() == "cue.mod" { + return true + } + } + } + // Generate all entries for the package before moving onto packages + // in subdirectories. + for _, entry := range entries { + if entry.IsDir() { + continue + } + fpath := path.Join(fpath, entry.Name()) + if _, ok := yieldPackageFile(fsys, fpath, func(string) bool { return true }, yield); !ok { + return false + } + } + + for _, entry := range entries { + name := entry.Name() + if !entry.IsDir() { + continue + } + if name == "cue.mod" || strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") { + continue + } + fpath := path.Join(fpath, name) + if !yieldAllModFiles(fsys, fpath, false, yield) { + return false + } + } + return true +} + +// yieldPackageFile invokes yield with the contents of the package file +// at the given path if selectPackage returns true for the file's +// package name. +// +// It returns the yielded package name (if any) and reports whether +// the iteration should continue. +func yieldPackageFile(fsys fs.FS, fpath string, selectPackage func(pkgName string) bool, yield func(ModuleFile, error) bool) (pkgName string, cont bool) { + if !strings.HasSuffix(fpath, ".cue") { + return "", true + } + pf := ModuleFile{ + FilePath: fpath, + } + f, err := fsys.Open(fpath) + if err != nil { + return "", yield(pf, err) + } + defer f.Close() + + // Note that we use cueimports.Read before parser.ParseFile as cue/parser + // will always consume the whole input reader, which is often wasteful. + // + // TODO(mvdan): the need for cueimports.Read can go once cue/parser can work + // on a reader in a streaming manner. + data, err := cueimports.Read(f) + if err != nil { + return "", yield(pf, err) + } + // Add a leading "./" so that a parse error filename is consistent + // with the other error filenames created elsewhere in the codebase. + syntax, err := parser.ParseFile("./"+fpath, data, parser.ImportsOnly) + if err != nil { + return "", yield(pf, err) + } + + if !selectPackage(syntax.PackageName()) { + return "", true + } + pf.Syntax = syntax + return syntax.PackageName(), yield(pf, nil) +} diff --git a/vendor/cuelang.org/go/internal/mod/modload/query.go b/vendor/cuelang.org/go/internal/mod/modload/query.go new file mode 100644 index 0000000000..45a73c1023 --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modload/query.go @@ -0,0 +1,135 @@ +package modload + +import ( + "context" + "fmt" + "path" + "runtime" + "sync" + + "cuelang.org/go/internal/mod/modpkgload" + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/internal/mod/semver" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/module" +) + +// queryImport attempts to locate a module that can be added to the +// current build list to provide the package with the given import path. +// It also reports whether a default major version will be required +// to select the candidates (this will be true if pkgPath lacks +// a major version). +// +// It avoids results that are already in the given requirements. +func (ld *loader) queryImport(ctx context.Context, pkgPath string, rs *modrequirements.Requirements) (candidates []module.Version, needsDefault bool, err error) { + if modpkgload.IsStdlibPackage(pkgPath) { + // This package isn't in the standard library and isn't in any module already + // in the build list. + // + // Moreover, the import path is reserved for the standard library, so + // QueryPattern cannot possibly find a module containing this package. + // + // Instead of trying QueryPattern, report an ImportMissingError immediately. + return nil, false, &modpkgload.ImportMissingError{Path: pkgPath} + } + + // Look up module containing the package, for addition to the build list. + // Goal is to determine the module, download it to dir, + // and return m, dir, ImportMissingError. + + // TODO this should probably be a non-debug log message. + logf("cue: finding module for package %s", pkgPath) + + candidates, needsDefault, err = ld.queryLatestModules(ctx, pkgPath, rs) + if err != nil { + return nil, false, err + } + if len(candidates) == 0 { + return nil, false, fmt.Errorf("%v", &modpkgload.ImportMissingError{Path: pkgPath}) + } + return candidates, needsDefault, nil +} + +// queryLatestModules looks for potential modules that might contain the given +// package by looking for the latest module version of all viable prefixes of pkgPath. +// It does not return modules that are already present in the given requirements. +// It also reports whether a default major version will be required. +func (ld *loader) queryLatestModules(ctx context.Context, pkgPath string, rs *modrequirements.Requirements) ([]module.Version, bool, error) { + parts := module.ParseImportPath(pkgPath) + latestModuleForPrefix := func(prefix string) (module.Version, error) { + mv := parts.Version + if mv == "" { + var status modrequirements.MajorVersionDefaultStatus + mv, status = rs.DefaultMajorVersion(prefix) + if status == modrequirements.AmbiguousDefault { + // There are already multiple possibilities and + // we don't have any way of choosing one. + return module.Version{}, nil + } + } + mpath := prefix + if mv != "" { + mpath = prefix + "@" + mv + if _, ok := rs.RootSelected(mpath); ok { + // Already present in current requirements. + return module.Version{}, nil + } + } + + versions, err := ld.registry.ModuleVersions(ctx, mpath) + logf("getting module versions for %q (prefix %q) -> %q, %v", mpath, prefix, versions, err) + if err != nil { + return module.Version{}, err + } + logf("-> %q", versions) + if v := latestVersion(versions); v != "" { + return module.NewVersion(prefix, v) + } + return module.Version{}, nil + } + work := par.NewQueue(runtime.GOMAXPROCS(0)) + var ( + mu sync.Mutex + candidates []module.Version + queryErr error + ) + logf("initial module path %q", parts.Path) + for prefix := parts.Path; prefix != "."; prefix = path.Dir(prefix) { + prefix := prefix + work.Add(func() { + v, err := latestModuleForPrefix(prefix) + mu.Lock() + defer mu.Unlock() + if err != nil { + if queryErr == nil { + queryErr = err + } + return + } + if v.IsValid() { + candidates = append(candidates, v) + } + }) + } + <-work.Idle() + return candidates, parts.Version == "", queryErr +} + +// latestVersion returns the latest of any of the given versions, +// ignoring prerelease versions if there is any stable version. +func latestVersion(versions []string) string { + maxStable := "" + maxAny := "" + for _, v := range versions { + if semver.Prerelease(v) == "" && (maxStable == "" || semver.Compare(v, maxStable) > 0) { + maxStable = v + } + if maxAny == "" || semver.Compare(v, maxAny) > 0 { + maxAny = v + } + } + if maxStable != "" { + return maxStable + } + return maxAny +} diff --git a/vendor/cuelang.org/go/internal/mod/modload/tidy.go b/vendor/cuelang.org/go/internal/mod/modload/tidy.go new file mode 100644 index 0000000000..d5af2dccee --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modload/tidy.go @@ -0,0 +1,693 @@ +package modload + +import ( + "context" + "errors" + "fmt" + "io/fs" + "log" + "maps" + "path" + "runtime" + "slices" + + "cuelang.org/go/internal/mod/modimports" + "cuelang.org/go/internal/mod/modpkgload" + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/internal/mod/semver" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/modfile" + "cuelang.org/go/mod/module" +) + +const logging = false // TODO hook this up to CUE_DEBUG + +// Registry is modload's view of a module registry. +type Registry interface { + modrequirements.Registry + modpkgload.Registry + // ModuleVersions returns all the versions for the module with the given path + // sorted in semver order. + // If mpath has a major version suffix, only versions with that major version will + // be returned. + ModuleVersions(ctx context.Context, mpath string) ([]string, error) +} + +type loader struct { + mainModule module.Version + mainModuleLoc module.SourceLoc + registry Registry + checkTidy bool +} + +// CheckTidy checks that the module file in the given main module is considered tidy. +// A module file is considered tidy when: +// - it can be parsed OK by [modfile.ParseStrict]. +// - it contains a language version in canonical semver form +// - it includes valid modules for all of its dependencies +// - it does not include any unnecessary dependencies. +func CheckTidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry) error { + _, err := tidy(ctx, fsys, modRoot, reg, true) + return err +} + +// Tidy evaluates all the requirements of the given main module, using the given +// registry to download requirements and returns a resolved and tidied module file. +func Tidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry) (*modfile.File, error) { + return tidy(ctx, fsys, modRoot, reg, false) +} + +func tidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry, checkTidy bool) (*modfile.File, error) { + mainModuleVersion, mf, err := readModuleFile(fsys, modRoot) + if err != nil { + return nil, err + } + // TODO check that module path is well formed etc + origRs := modrequirements.NewRequirements(mf.QualifiedModule(), reg, mf.DepVersions(), mf.DefaultMajorVersions()) + rootPkgPaths, err := modimports.AllImports(modimports.AllModuleFiles(fsys, modRoot)) + if err != nil { + return nil, err + } + ld := &loader{ + mainModule: mainModuleVersion, + registry: reg, + mainModuleLoc: module.SourceLoc{ + FS: fsys, + Dir: modRoot, + }, + checkTidy: checkTidy, + } + + rs, pkgs, err := ld.resolveDependencies(ctx, rootPkgPaths, origRs) + if err != nil { + return nil, err + } + for _, pkg := range pkgs.All() { + if pkg.Error() != nil { + return nil, fmt.Errorf("failed to resolve %q: %v", pkg.ImportPath(), pkg.Error()) + } + } + // TODO check whether it's changed or not. + rs, err = ld.tidyRoots(ctx, rs, pkgs) + if err != nil { + return nil, fmt.Errorf("cannot tidy requirements: %v", err) + } + if ld.checkTidy && !equalRequirements(origRs, rs) { + // TODO: provide a reason, perhaps in structured form rather than a string + return nil, &ErrModuleNotTidy{} + } + return modfileFromRequirements(mf, rs), nil +} + +// ErrModuleNotTidy is returned by CheckTidy when a module is not tidy, +// such as when there are missing or unnecessary dependencies listed. +type ErrModuleNotTidy struct { + // Reason summarizes why the module is not tidy. + Reason string +} + +func (e ErrModuleNotTidy) Error() string { + if e.Reason == "" { + return "module is not tidy" + } + return "module is not tidy: " + e.Reason +} + +func equalRequirements(rs0, rs1 *modrequirements.Requirements) bool { + // Note that rs1.RootModules may include the unversioned local module + // if the current module imports any packages under cue.mod/*/. + // In such a case we want to skip over the local module when comparing, + // just like modfileFromRequirements does when filling [modfile.File.Deps]. + // Note that we clone the slice to not modify rs1's internal slice in-place. + rs1RootMods := slices.DeleteFunc(slices.Clone(rs1.RootModules()), module.Version.IsLocal) + return slices.Equal(rs0.RootModules(), rs1RootMods) && + maps.Equal(rs0.DefaultMajorVersions(), rs1.DefaultMajorVersions()) +} + +func readModuleFile(fsys fs.FS, modRoot string) (module.Version, *modfile.File, error) { + modFilePath := path.Join(modRoot, "cue.mod/module.cue") + data, err := fs.ReadFile(fsys, modFilePath) + if err != nil { + return module.Version{}, nil, fmt.Errorf("cannot read cue.mod file: %v", err) + } + mf, err := modfile.ParseNonStrict(data, modFilePath) + if err != nil { + return module.Version{}, nil, err + } + mainModuleVersion, err := module.NewVersion(mf.QualifiedModule(), "") + if err != nil { + return module.Version{}, nil, fmt.Errorf("invalid module path %q: %v", mf.QualifiedModule(), err) + } + return mainModuleVersion, mf, nil +} + +func modfileFromRequirements(old *modfile.File, rs *modrequirements.Requirements) *modfile.File { + // TODO it would be nice to have some way of automatically including new + // fields by default when they're added to modfile.File, but we don't + // want to just copy the entirety of old because that includes + // private fields too. + mf := &modfile.File{ + Module: old.Module, + Language: old.Language, + Deps: make(map[string]*modfile.Dep), + Source: old.Source, + } + defaults := rs.DefaultMajorVersions() + for _, v := range rs.RootModules() { + if v.IsLocal() { + continue + } + mf.Deps[v.Path()] = &modfile.Dep{ + Version: v.Version(), + Default: defaults[v.BasePath()] == semver.Major(v.Version()), + } + } + return mf +} + +func (ld *loader) resolveDependencies(ctx context.Context, rootPkgPaths []string, rs *modrequirements.Requirements) (*modrequirements.Requirements, *modpkgload.Packages, error) { + for { + logf("---- LOADING from requirements %q", rs.RootModules()) + pkgs := modpkgload.LoadPackages(ctx, ld.mainModule.Path(), ld.mainModuleLoc, rs, ld.registry, rootPkgPaths) + if ld.checkTidy { + for _, pkg := range pkgs.All() { + err := pkg.Error() + if err == nil { + continue + } + missingErr := new(modpkgload.ImportMissingError) + // "cannot find module providing package P" is confusing here, + // as checkTidy simply points out missing dependencies without fetching them. + if errors.As(err, &missingErr) { + err = &ErrModuleNotTidy{Reason: fmt.Sprintf( + "missing dependency providing package %s", missingErr.Path)} + } + return nil, nil, err + } + // All packages could be loaded OK so there are no new + // dependencies to be resolved and nothing to do. + // Specifically, if there are no packages in error, then + // resolveMissingImports will never return any entries + // in modAddedBy and the default major versions won't + // change. + return rs, pkgs, nil + } + + // TODO the original code calls updateRequirements at this point. + // /home/rogpeppe/go/src/cmd/go/internal/modload/load.go:1124 + + modAddedBy, defaultMajorVersions := ld.resolveMissingImports(ctx, pkgs, rs) + if !maps.Equal(defaultMajorVersions, rs.DefaultMajorVersions()) { + rs = rs.WithDefaultMajorVersions(defaultMajorVersions) + } + if len(modAddedBy) == 0 { + // The roots are stable, and we've resolved all of the missing packages + // that we can. + logf("dependencies are stable at %q", rs.RootModules()) + return rs, pkgs, nil + } + toAdd := make([]module.Version, 0, len(modAddedBy)) + // TODO use maps.Keys when we can. + for m, p := range modAddedBy { + logf("added: %v (by %v)", modAddedBy, p.ImportPath()) + toAdd = append(toAdd, m) + } + module.Sort(toAdd) // to make errors deterministic + oldRs := rs + var err error + rs, err = ld.updateRoots(ctx, rs, pkgs, toAdd) + if err != nil { + return nil, nil, err + } + if slices.Equal(rs.RootModules(), oldRs.RootModules()) { + // Something is deeply wrong. resolveMissingImports gave us a non-empty + // set of modules to add to the graph, but adding those modules had no + // effect — either they were already in the graph, or updateRoots did not + // add them as requested. + panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.RootModules())) + } + logf("after loading, requirements: %v", rs.RootModules()) + } +} + +// updatePrunedRoots returns a set of root requirements that maintains the +// invariants of the cue.mod/module.cue file needed to support graph pruning: +// +// 1. The selected version of the module providing each package marked with +// either pkgInAll or pkgIsRoot is included as a root. +// Note that certain root patterns (such as '...') may explode the root set +// to contain every module that provides any package imported (or merely +// required) by any other module. +// 2. Each root appears only once, at the selected version of its path +// (if rs.graph is non-nil) or at the highest version otherwise present as a +// root (otherwise). +// 3. Every module path that appears as a root in rs remains a root. +// 4. Every version in add is selected at its given version unless upgraded by +// (the dependencies of) an existing root or another module in add. +// +// The packages in pkgs are assumed to have been loaded from either the roots of +// rs or the modules selected in the graph of rs. +// +// The above invariants together imply the graph-pruning invariants for the +// go.mod file: +// +// 1. (The import invariant.) Every module that provides a package transitively +// imported by any package or test in the main module is included as a root. +// This follows by induction from (1) and (3) above. Transitively-imported +// packages loaded during this invocation are marked with pkgInAll (1), +// and by hypothesis any transitively-imported packages loaded in previous +// invocations were already roots in rs (3). +// +// 2. (The argument invariant.) Every module that provides a package matching +// an explicit package pattern is included as a root. This follows directly +// from (1): packages matching explicit package patterns are marked with +// pkgIsRoot. +// +// 3. (The completeness invariant.) Every module that contributed any package +// to the build is required by either the main module or one of the modules +// it requires explicitly. This invariant is left up to the caller, who must +// not load packages from outside the module graph but may add roots to the +// graph, but is facilitated by (3). If the caller adds roots to the graph in +// order to resolve missing packages, then updatePrunedRoots will retain them, +// the selected versions of those roots cannot regress, and they will +// eventually be written back to the main module's go.mod file. +// +// (See https://golang.org/design/36460-lazy-module-loading#invariants for more +// detail.) +func (ld *loader) updateRoots(ctx context.Context, rs *modrequirements.Requirements, pkgs *modpkgload.Packages, add []module.Version) (*modrequirements.Requirements, error) { + roots := rs.RootModules() + rootsUpgraded := false + + spotCheckRoot := map[module.Version]bool{} + + // “The selected version of the module providing each package marked with + // either pkgInAll or pkgIsRoot is included as a root.” + needSort := false + for _, pkg := range pkgs.All() { + if !pkg.Mod().IsValid() || !pkg.FromExternalModule() { + // pkg was not loaded from a module dependency, so we don't need + // to do anything special to maintain that dependency. + continue + } + + switch { + case pkg.HasFlags(modpkgload.PkgInAll): + // pkg is transitively imported by a package or test in the main module. + // We need to promote the module that maintains it to a root: if some + // other module depends on the main module, and that other module also + // uses a pruned module graph, it will expect to find all of our + // transitive dependencies by reading just our go.mod file, not the go.mod + // files of everything we depend on. + // + // (This is the “import invariant” that makes graph pruning possible.) + + case pkg.HasFlags(modpkgload.PkgIsRoot): + // pkg is a root of the package-import graph. (Generally this means that + // it matches a command-line argument.) We want future invocations of the + // 'go' command — such as 'go test' on the same package — to continue to + // use the same versions of its dependencies that we are using right now. + // So we need to bring this package's dependencies inside the pruned + // module graph. + // + // Making the module containing this package a root of the module graph + // does exactly that: if the module containing the package supports graph + // pruning then it should satisfy the import invariant itself, so all of + // its dependencies should be in its go.mod file, and if the module + // containing the package does not support pruning then if we make it a + // root we will load all of its (unpruned) transitive dependencies into + // the module graph. + // + // (This is the “argument invariant”, and is important for + // reproducibility.) + + default: + // pkg is a dependency of some other package outside of the main module. + // As far as we know it's not relevant to the main module (and thus not + // relevant to consumers of the main module either), and its dependencies + // should already be in the module graph — included in the dependencies of + // the package that imported it. + continue + } + if _, ok := rs.RootSelected(pkg.Mod().Path()); ok { + // It is possible that the main module's go.mod file is incomplete or + // otherwise erroneous — for example, perhaps the author forgot to 'git + // add' their updated go.mod file after adding a new package import, or + // perhaps they made an edit to the go.mod file using a third-party tool + // ('git merge'?) that doesn't maintain consistency for module + // dependencies. If that happens, ideally we want to detect the missing + // requirements and fix them up here. + // + // However, we also need to be careful not to be too aggressive. For + // transitive dependencies of external tests, the go.mod file for the + // module containing the test itself is expected to provide all of the + // relevant dependencies, and we explicitly don't want to pull in + // requirements on *irrelevant* requirements that happen to occur in the + // go.mod files for these transitive-test-only dependencies. (See the test + // in mod_lazy_test_horizon.txt for a concrete example). + // + // The “goldilocks zone” seems to be to spot-check exactly the same + // modules that we promote to explicit roots: namely, those that provide + // packages transitively imported by the main module, and those that + // provide roots of the package-import graph. That will catch erroneous + // edits to the main module's go.mod file and inconsistent requirements in + // dependencies that provide imported packages, but will ignore erroneous + // or misleading requirements in dependencies that aren't obviously + // relevant to the packages in the main module. + spotCheckRoot[pkg.Mod()] = true + } else { + roots = append(roots, pkg.Mod()) + rootsUpgraded = true + // The roots slice was initially sorted because rs.rootModules was sorted, + // but the root we just added could be out of order. + needSort = true + } + } + + for _, m := range add { + if !m.IsValid() { + panic("add contains invalid module") + } + if v, ok := rs.RootSelected(m.Path()); !ok || semver.Compare(v, m.Version()) < 0 { + roots = append(roots, m) + rootsUpgraded = true + needSort = true + } + } + if needSort { + module.Sort(roots) + } + + // "Each root appears only once, at the selected version of its path ….” + for { + var mg *modrequirements.ModuleGraph + if rootsUpgraded { + // We've added or upgraded one or more roots, so load the full module + // graph so that we can update those roots to be consistent with other + // requirements. + + rs = modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, rs.DefaultMajorVersions()) + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } else { + // Since none of the roots have been upgraded, we have no reason to + // suspect that they are inconsistent with the requirements of any other + // roots. Only look at the full module graph if we've already loaded it; + // otherwise, just spot-check the explicit requirements of the roots from + // which we loaded packages. + if rs.GraphIsLoaded() { + // We've already loaded the full module graph, which includes the + // requirements of all of the root modules — even the transitive + // requirements, if they are unpruned! + mg, _ = rs.Graph(ctx) + } else if !ld.spotCheckRoots(ctx, rs, spotCheckRoot) { + // We spot-checked the explicit requirements of the roots that are + // relevant to the packages we've loaded. Unfortunately, they're + // inconsistent in some way; we need to load the full module graph + // so that we can fix the roots properly. + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } + } + + roots = make([]module.Version, 0, len(rs.RootModules())) + rootsUpgraded = false + inRootPaths := map[string]bool{ + ld.mainModule.Path(): true, + } + for _, m := range rs.RootModules() { + if inRootPaths[m.Path()] { + // This root specifies a redundant path. We already retained the + // selected version of this path when we saw it before, so omit the + // redundant copy regardless of its version. + // + // When we read the full module graph, we include the dependencies of + // every root even if that root is redundant. That better preserves + // reproducibility if, say, some automated tool adds a redundant + // 'require' line and then runs 'go mod tidy' to try to make everything + // consistent, since the requirements of the older version are carried + // over. + // + // So omitting a root that was previously present may *reduce* the + // selected versions of non-roots, but merely removing a requirement + // cannot *increase* the selected versions of other roots as a result — + // we don't need to mark this change as an upgrade. (This particular + // change cannot invalidate any other roots.) + continue + } + + var v string + if mg == nil { + v, _ = rs.RootSelected(m.Path()) + } else { + v = mg.Selected(m.Path()) + } + mv, err := module.NewVersion(m.Path(), v) + if err != nil { + return nil, fmt.Errorf("internal error: cannot form module version from %q@%q", m.Path(), v) + } + roots = append(roots, mv) + inRootPaths[m.Path()] = true + if v != m.Version() { + rootsUpgraded = true + } + } + // Note that rs.rootModules was already sorted by module path and version, + // and we appended to the roots slice in the same order and guaranteed that + // each path has only one version, so roots is also sorted by module path + // and (trivially) version. + + if !rootsUpgraded { + // The root set has converged: every root going into this iteration was + // already at its selected version, although we have have removed other + // (redundant) roots for the same path. + break + } + } + + if slices.Equal(roots, rs.RootModules()) { + // The root set is unchanged and rs was already pruned, so keep rs to + // preserve its cached ModuleGraph (if any). + return rs, nil + } + return modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, rs.DefaultMajorVersions()), nil +} + +// resolveMissingImports returns a set of modules that could be added as +// dependencies in order to resolve missing packages from pkgs. +// +// It returns a map from each new module version to +// the first missing package that module would resolve. +func (ld *loader) resolveMissingImports(ctx context.Context, pkgs *modpkgload.Packages, rs *modrequirements.Requirements) (modAddedBy map[module.Version]*modpkgload.Package, defaultMajorVersions map[string]string) { + type pkgMod struct { + pkg *modpkgload.Package + needsDefault *bool + mods *[]module.Version + } + var pkgMods []pkgMod + work := par.NewQueue(runtime.GOMAXPROCS(0)) + for _, pkg := range pkgs.All() { + pkg := pkg + if pkg.Error() == nil { + continue + } + if !errors.As(pkg.Error(), new(*modpkgload.ImportMissingError)) { + // Leave other errors to be reported outside of the module resolution logic. + continue + } + logf("querying %q", pkg.ImportPath()) + var mods []module.Version // updated asynchronously. + var needsDefault bool + work.Add(func() { + var err error + mods, needsDefault, err = ld.queryImport(ctx, pkg.ImportPath(), rs) + if err != nil { + // pkg.err was already non-nil, so we can reasonably attribute the error + // for pkg to either the original error or the one returned by + // queryImport. The existing error indicates only that we couldn't find + // the package, whereas the query error also explains why we didn't fix + // the problem — so we prefer the latter. + pkg.SetError(err) + } + + // err is nil, but we intentionally leave pkg.err non-nil: we still haven't satisfied other invariants of a + // successfully-loaded package, such as scanning and loading the imports + // of that package. If we succeed in resolving the new dependency graph, + // the caller can reload pkg and update the error at that point. + // + // Even then, the package might not be loaded from the version we've + // identified here. The module may be upgraded by some other dependency, + // or by a transitive dependency of mod itself, or — less likely — the + // package may be rejected by an AllowPackage hook or rendered ambiguous + // by some other newly-added or newly-upgraded dependency. + }) + + pkgMods = append(pkgMods, pkgMod{pkg: pkg, mods: &mods, needsDefault: &needsDefault}) + } + <-work.Idle() + + modAddedBy = map[module.Version]*modpkgload.Package{} + defaultMajorVersions = make(map[string]string) + for m, v := range rs.DefaultMajorVersions() { + defaultMajorVersions[m] = v + } + for _, pm := range pkgMods { + pkg, mods, needsDefault := pm.pkg, *pm.mods, *pm.needsDefault + for _, mod := range mods { + // TODO support logging progress messages like this but without printing to stderr? + logf("cue: found potential %s in %v", pkg.ImportPath(), mod) + if modAddedBy[mod] == nil { + modAddedBy[mod] = pkg + } + if needsDefault { + defaultMajorVersions[mod.BasePath()] = semver.Major(mod.Version()) + } + } + } + + return modAddedBy, defaultMajorVersions +} + +// tidyRoots returns a minimal set of root requirements that maintains the +// invariants of the cue.mod/module.cue file needed to support graph pruning for the given +// packages: +// +// 1. For each package marked with PkgInAll, the module path that provided that +// package is included as a root. +// 2. For all packages, the module that provided that package either remains +// selected at the same version or is upgraded by the dependencies of a +// root. +// +// If any module that provided a package has been upgraded above its previous +// version, the caller may need to reload and recompute the package graph. +// +// To ensure that the loading process eventually converges, the caller should +// add any needed roots from the tidy root set (without removing existing untidy +// roots) until the set of roots has converged. +func (ld *loader) tidyRoots(ctx context.Context, old *modrequirements.Requirements, pkgs *modpkgload.Packages) (*modrequirements.Requirements, error) { + var ( + roots []module.Version + pathIsRoot = map[string]bool{ld.mainModule.Path(): true} + ) + // We start by adding roots for every package in "all". + // + // Once that is done, we may still need to add more roots to cover upgraded or + // otherwise-missing test dependencies for packages in "all". For those test + // dependencies, we prefer to add roots for packages with shorter import + // stacks first, on the theory that the module requirements for those will + // tend to fill in the requirements for their transitive imports (which have + // deeper import stacks). So we add the missing dependencies for one depth at + // a time, starting with the packages actually in "all" and expanding outwards + // until we have scanned every package that was loaded. + var ( + queue []*modpkgload.Package + queued = map[*modpkgload.Package]bool{} + ) + for _, pkg := range pkgs.All() { + if !pkg.HasFlags(modpkgload.PkgInAll) { + continue + } + if pkg.FromExternalModule() && !pathIsRoot[pkg.Mod().Path()] { + roots = append(roots, pkg.Mod()) + pathIsRoot[pkg.Mod().Path()] = true + } + queue = append(queue, pkg) + queued[pkg] = true + } + module.Sort(roots) + tidy := modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, old.DefaultMajorVersions()) + + for len(queue) > 0 { + roots = tidy.RootModules() + mg, err := tidy.Graph(ctx) + if err != nil { + return nil, err + } + + prevQueue := queue + queue = nil + for _, pkg := range prevQueue { + m := pkg.Mod() + if m.Path() == "" { + continue + } + for _, dep := range pkg.Imports() { + if !queued[dep] { + queue = append(queue, dep) + queued[dep] = true + } + } + if !pathIsRoot[m.Path()] { + if s := mg.Selected(m.Path()); semver.Compare(s, m.Version()) < 0 { + roots = append(roots, m) + pathIsRoot[m.Path()] = true + } + } + } + + if tidyRoots := tidy.RootModules(); len(roots) > len(tidyRoots) { + module.Sort(roots) + tidy = modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, tidy.DefaultMajorVersions()) + } + } + + if _, err := tidy.Graph(ctx); err != nil { + return nil, err + } + + // TODO the original code had some logic I don't properly understand, + // related to https://go.dev/issue/60313, that _may_ be relevant only + // to test-only dependencies, which we don't have, so leave it out for now. + + return tidy, nil +} + +// spotCheckRoots reports whether the versions of the roots in rs satisfy the +// explicit requirements of the modules in mods. +func (ld *loader) spotCheckRoots(ctx context.Context, rs *modrequirements.Requirements, mods map[module.Version]bool) bool { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + work := par.NewQueue(runtime.GOMAXPROCS(0)) + for m := range mods { + m := m + work.Add(func() { + if ctx.Err() != nil { + return + } + + require, err := ld.registry.Requirements(ctx, m) + if err != nil { + cancel() + return + } + + for _, r := range require { + if v, ok := rs.RootSelected(r.Path()); ok && semver.Compare(v, r.Version()) < 0 { + cancel() + return + } + } + }) + } + <-work.Idle() + + if ctx.Err() != nil { + // Either we failed a spot-check, or the caller no longer cares about our + // answer anyway. + return false + } + + return true +} + +func logf(f string, a ...any) { + if logging { + log.Printf(f, a...) + } +} diff --git a/vendor/cuelang.org/go/internal/mod/modload/update.go b/vendor/cuelang.org/go/internal/mod/modload/update.go new file mode 100644 index 0000000000..9ab8c1944c --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modload/update.go @@ -0,0 +1,175 @@ +package modload + +import ( + "context" + "fmt" + "io/fs" + "runtime" + "strings" + "sync/atomic" + + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/internal/mod/semver" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/modfile" + "cuelang.org/go/mod/module" +) + +// UpdateVersions returns the main module's module file with the specified module versions +// updated if possible and added if not already present. It returns an error if asked +// to downgrade a module below a version already required by an external dependency. +// +// A module in the versions slice can be specified as one of the following: +// - $module@$fullVersion: a specific exact version +// - $module@$partialVersion: a non-canonical version +// specifies the latest version that has the same major/minor numbers. +// - $module@latest: the latest non-prerelease version, or latest prerelease version if +// there is no non-prerelease version +// - $module: equivalent to $module@latest if $module doesn't have a default major +// version or $module@$majorVersion if it does, where $majorVersion is the +// default major version for $module. +func UpdateVersions(ctx context.Context, fsys fs.FS, modRoot string, reg Registry, versions []string) (*modfile.File, error) { + mainModuleVersion, mf, err := readModuleFile(fsys, modRoot) + if err != nil { + return nil, err + } + rs := modrequirements.NewRequirements(mf.QualifiedModule(), reg, mf.DepVersions(), mf.DefaultMajorVersions()) + mversions, err := resolveUpdateVersions(ctx, reg, rs, mainModuleVersion, versions) + if err != nil { + return nil, err + } + // Now we know what versions we want to update to, make a new set of + // requirements with these versions in place. + + mversionsMap := make(map[string]module.Version) + for _, v := range mversions { + // Check existing membership of the map: if the same module has been specified + // twice, then choose t + if v1, ok := mversionsMap[v.Path()]; ok && v1.Version() != v.Version() { + // The same module has been specified twice with different requirements. + // Treat it as an error (an alternative approach might be to choose the greater + // version, but making it an error seems more appropriate to the "choose exact + // version" semantics of UpdateVersions. + return nil, fmt.Errorf("conflicting version update requirements %v vs %v", v1, v) + } + mversionsMap[v.Path()] = v + } + g, err := rs.Graph(ctx) + if err != nil { + return nil, fmt.Errorf("cannot determine module graph: %v", err) + } + var newVersions []module.Version + for _, v := range g.BuildList() { + if v.Path() == mainModuleVersion.Path() { + continue + } + if newv, ok := mversionsMap[v.Path()]; ok { + newVersions = append(newVersions, newv) + delete(mversionsMap, v.Path()) + } else { + newVersions = append(newVersions, v) + } + } + for _, v := range mversionsMap { + newVersions = append(newVersions, v) + } + rs = modrequirements.NewRequirements(mf.QualifiedModule(), reg, newVersions, mf.DefaultMajorVersions()) + g, err = rs.Graph(ctx) + if err != nil { + return nil, fmt.Errorf("cannot determine new module graph: %v", err) + } + // Now check that the resulting versions are the ones we wanted. + for _, v := range mversions { + actualVers := g.Selected(v.Path()) + if actualVers != v.Version() { + return nil, fmt.Errorf("other requirements prevent changing module %v to version %v (actual selected version: %v)", v.Path(), v.Version(), actualVers) + } + } + // Make a new requirements with the selected versions of the above as roots. + var finalVersions []module.Version + for _, v := range g.BuildList() { + if v.Path() != mainModuleVersion.Path() { + finalVersions = append(finalVersions, v) + } + } + rs = modrequirements.NewRequirements(mf.QualifiedModule(), reg, finalVersions, mf.DefaultMajorVersions()) + return modfileFromRequirements(mf, rs), nil +} + +// resolveUpdateVersions resolves a set of version strings as accepted by [UpdateVersions] +// into the actual module versions they represent. +func resolveUpdateVersions(ctx context.Context, reg Registry, rs *modrequirements.Requirements, mainModuleVersion module.Version, versions []string) ([]module.Version, error) { + work := par.NewQueue(runtime.GOMAXPROCS(0)) + mversions := make([]module.Version, len(versions)) + var queryErr atomic.Pointer[error] + setError := func(err error) { + queryErr.CompareAndSwap(nil, &err) + } + for i, v := range versions { + i, v := i, v + if mv, err := module.ParseVersion(v); err == nil { + // It's already canonical: nothing more to do. + mversions[i] = mv + continue + } + mpath, vers, ok := strings.Cut(v, "@") + if !ok { + if major, status := rs.DefaultMajorVersion(mpath); status == modrequirements.ExplicitDefault { + // TODO allow a non-explicit default too? + vers = major + } else { + vers = "latest" + } + } + if err := module.CheckPathWithoutVersion(mpath); err != nil { + return nil, fmt.Errorf("invalid module path in %q", v) + } + versionPrefix := "" + if vers != "latest" { + if !semver.IsValid(vers) { + return nil, fmt.Errorf("%q does not specify a valid semantic version", v) + } + if semver.Build(vers) != "" { + return nil, fmt.Errorf("build version suffixes not supported (%v)", v) + } + // It's a valid version but has no build suffix and it's not canonical, + // which means it must be either a major-only or major-minor, so + // the conforming canonical versions must have it as a prefix, with + // a dot separating the last component and the next. + versionPrefix = vers + "." + } + work.Add(func() { + allVersions, err := reg.ModuleVersions(ctx, mpath) + if err != nil { + setError(err) + return + } + possibleVersions := make([]string, 0, len(allVersions)) + for _, v := range allVersions { + if strings.HasPrefix(v, versionPrefix) { + possibleVersions = append(possibleVersions, v) + } + } + chosen := latestVersion(possibleVersions) + mv, err := module.NewVersion(mpath, chosen) + if err != nil { + // Should never happen, because we've checked that + // mpath is valid and ModuleVersions + // should always return valid semver versions. + setError(err) + return + } + mversions[i] = mv + }) + } + <-work.Idle() + if errPtr := queryErr.Load(); errPtr != nil { + return nil, *errPtr + } + for _, v := range mversions { + if v.Path() == mainModuleVersion.Path() { + return nil, fmt.Errorf("cannot update version of main module") + } + } + return mversions, nil +} diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/import.go b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go new file mode 100644 index 0000000000..252a8d13ae --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go @@ -0,0 +1,316 @@ +package modpkgload + +import ( + "context" + "errors" + "fmt" + "io/fs" + "path" + "path/filepath" + "slices" + "strings" + + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/mod/module" +) + +// importFromModules finds the module and source location in the dependency graph of +// pkgs containing the package with the given import path. +// +// The answer must be unique: importFromModules returns an error if multiple +// modules are observed to provide the same package. +// +// importFromModules can return a zero module version for packages in +// the standard library. +// +// If the package is not present in any module selected from the requirement +// graph, importFromModules returns an *ImportMissingError. +// +// If the package is present in exactly one module, importFromModules will +// return the module, its root directory, and a list of other modules that +// lexically could have provided the package but did not. +func (pkgs *Packages) importFromModules(ctx context.Context, pkgPath string) (m module.Version, pkgLocs []module.SourceLoc, altMods []module.Version, err error) { + fail := func(err error) (module.Version, []module.SourceLoc, []module.Version, error) { + return module.Version{}, []module.SourceLoc(nil), nil, err + } + failf := func(format string, args ...interface{}) (module.Version, []module.SourceLoc, []module.Version, error) { + return fail(fmt.Errorf(format, args...)) + } + // Note: we don't care about the package qualifier at this point + // because any directory with CUE files in counts as a possible + // candidate, regardless of what packages are in it. + pathParts := module.ParseImportPath(pkgPath) + pkgPathOnly := pathParts.Path + + if filepath.IsAbs(pkgPathOnly) || path.IsAbs(pkgPathOnly) { + return failf("%q is not a package path", pkgPath) + } + // TODO check that the path isn't relative. + // TODO check it's not a meta package name, such as "all". + + // Before any further lookup, check that the path is valid. + if err := module.CheckImportPath(pkgPath); err != nil { + return fail(err) + } + + // Check each module on the build list. + var locs [][]module.SourceLoc + var mods []module.Version + var mg *modrequirements.ModuleGraph + localPkgLocs, err := pkgs.findLocalPackage(pkgPathOnly) + if err != nil { + return fail(err) + } + if len(localPkgLocs) > 0 { + mods = append(mods, module.MustNewVersion("local", "")) + locs = append(locs, localPkgLocs) + } + + // Iterate over possible modules for the path, not all selected modules. + // Iterating over selected modules would make the overall loading time + // O(M × P) for M modules providing P imported packages, whereas iterating + // over path prefixes is only O(P × k) with maximum path depth k. For + // large projects both M and P may be very large (note that M ≤ P), but k + // will tend to remain smallish (if for no other reason than filesystem + // path limitations). + // + // We perform this iteration either one or two times. + // Firstly we attempt to load the package using only the main module and + // its root requirements. If that does not identify the package, then we attempt + // to load the package using the full + // requirements in mg. + for { + var altMods []module.Version + // TODO we could probably do this loop concurrently. + + for prefix := pkgPathOnly; prefix != "."; prefix = path.Dir(prefix) { + var ( + v string + ok bool + ) + pkgVersion := pathParts.Version + if pkgVersion == "" { + if pkgVersion, _ = pkgs.requirements.DefaultMajorVersion(prefix); pkgVersion == "" { + continue + } + } + prefixPath := prefix + "@" + pkgVersion + if mg == nil { + v, ok = pkgs.requirements.RootSelected(prefixPath) + } else { + v, ok = mg.Selected(prefixPath), true + } + if !ok || v == "none" { + continue + } + m, err := module.NewVersion(prefixPath, v) + if err != nil { + // Not all package paths are valid module versions, + // but a parent might be. + continue + } + mloc, isLocal, err := pkgs.fetch(ctx, m) + if err != nil { + // Report fetch error. + // Note that we don't know for sure this module is necessary, + // but it certainly _could_ provide the package, and even if we + // continue the loop and find the package in some other module, + // we need to look at this module to make sure the import is + // not ambiguous. + return fail(fmt.Errorf("cannot fetch %v: %v", m, err)) + } + if loc, ok, err := locInModule(pkgPathOnly, prefix, mloc, isLocal); err != nil { + return fail(fmt.Errorf("cannot find package: %v", err)) + } else if ok { + mods = append(mods, m) + locs = append(locs, []module.SourceLoc{loc}) + } else { + altMods = append(altMods, m) + } + } + + if len(mods) > 1 { + // We produce the list of directories from longest to shortest candidate + // module path, but the AmbiguousImportError should report them from + // shortest to longest. Reverse them now. + slices.Reverse(mods) + slices.Reverse(locs) + return fail(&AmbiguousImportError{ImportPath: pkgPath, Locations: locs, Modules: mods}) + } + + if len(mods) == 1 { + // We've found the unique module containing the package. + return mods[0], locs[0], altMods, nil + } + + if mg != nil { + // We checked the full module graph and still didn't find the + // requested package. + return fail(&ImportMissingError{Path: pkgPath}) + } + + // So far we've checked the root dependencies. + // Load the full module graph and try again. + mg, err = pkgs.requirements.Graph(ctx) + if err != nil { + // We might be missing one or more transitive (implicit) dependencies from + // the module graph, so we can't return an ImportMissingError here — one + // of the missing modules might actually contain the package in question, + // in which case we shouldn't go looking for it in some new dependency. + return fail(fmt.Errorf("cannot expand module graph: %v", err)) + } + } +} + +// locInModule returns the location that would hold the package named by the given path, +// if it were in the module with module path mpath and root location mloc. +// If pkgPath is syntactically not within mpath, +// or if mdir is a local file tree (isLocal == true) and the directory +// that would hold path is in a sub-module (covered by a go.mod below mdir), +// locInModule returns "", false, nil. +// +// Otherwise, locInModule returns the name of the directory where +// CUE source files would be expected, along with a boolean indicating +// whether there are in fact CUE source files in that directory. +// A non-nil error indicates that the existence of the directory and/or +// source files could not be determined, for example due to a permission error. +func locInModule(pkgPath, mpath string, mloc module.SourceLoc, isLocal bool) (loc module.SourceLoc, haveCUEFiles bool, err error) { + loc.FS = mloc.FS + + // Determine where to expect the package. + if pkgPath == mpath { + loc = mloc + } else if len(pkgPath) > len(mpath) && pkgPath[len(mpath)] == '/' && pkgPath[:len(mpath)] == mpath { + loc.Dir = path.Join(mloc.Dir, pkgPath[len(mpath)+1:]) + } else { + return module.SourceLoc{}, false, nil + } + + // Check that there aren't other modules in the way. + // This check is unnecessary inside the module cache. + // So we only check local module trees + // (the main module and, in the future, any directory trees pointed at by replace directives). + if isLocal { + for d := loc.Dir; d != mloc.Dir && len(d) > len(mloc.Dir); { + _, err := fs.Stat(mloc.FS, path.Join(d, "cue.mod/module.cue")) + // TODO should we count it as a module file if it's a directory? + haveCUEMod := err == nil + if haveCUEMod { + return module.SourceLoc{}, false, nil + } + parent := path.Dir(d) + if parent == d { + // Break the loop, as otherwise we'd loop + // forever if d=="." and mdir=="". + break + } + d = parent + } + } + + // Are there CUE source files in the directory? + // We don't care about build tags, not even "ignore". + // We're just looking for a plausible directory. + haveCUEFiles, err = isDirWithCUEFiles(loc) + if err != nil { + return module.SourceLoc{}, false, err + } + return loc, haveCUEFiles, err +} + +var localPkgDirs = []string{"cue.mod/gen", "cue.mod/usr", "cue.mod/pkg"} + +func (pkgs *Packages) findLocalPackage(pkgPath string) ([]module.SourceLoc, error) { + var locs []module.SourceLoc + for _, d := range localPkgDirs { + loc := pkgs.mainModuleLoc + loc.Dir = path.Join(loc.Dir, d, pkgPath) + ok, err := isDirWithCUEFiles(loc) + if err != nil { + return nil, err + } + if ok { + locs = append(locs, loc) + } + } + return locs, nil +} + +func isDirWithCUEFiles(loc module.SourceLoc) (bool, error) { + // It would be nice if we could inspect the error returned from ReadDir to see + // if it's failing because it's not a directory, but unfortunately that doesn't + // seem to be something defined by the Go fs interface. + // For now, catching fs.ErrNotExist seems to be enough. + entries, err := fs.ReadDir(loc.FS, loc.Dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + return false, err + } + for _, e := range entries { + if strings.HasSuffix(e.Name(), ".cue") && e.Type().IsRegular() { + return true, nil + } + } + return false, nil +} + +// fetch downloads the given module (or its replacement) +// and returns its location. +// +// The isLocal return value reports whether the replacement, +// if any, is within the local main module. +func (pkgs *Packages) fetch(ctx context.Context, mod module.Version) (loc module.SourceLoc, isLocal bool, err error) { + if mod == pkgs.mainModuleVersion { + return pkgs.mainModuleLoc, true, nil + } + + loc, err = pkgs.registry.Fetch(ctx, mod) + return loc, false, err +} + +// An AmbiguousImportError indicates an import of a package found in multiple +// modules in the build list, or found in both the main module and its vendor +// directory. +type AmbiguousImportError struct { + ImportPath string + Locations [][]module.SourceLoc + Modules []module.Version // Either empty or 1:1 with Dirs. +} + +func (e *AmbiguousImportError) Error() string { + locType := "modules" + if len(e.Modules) == 0 { + locType = "locations" + } + + var buf strings.Builder + fmt.Fprintf(&buf, "ambiguous import: found package %s in multiple %s:", e.ImportPath, locType) + + for i, loc := range e.Locations { + buf.WriteString("\n\t") + if i < len(e.Modules) { + m := e.Modules[i] + buf.WriteString(m.Path()) + if m.Version() != "" { + fmt.Fprintf(&buf, " %s", m.Version()) + } + // TODO work out how to present source locations in error messages. + fmt.Fprintf(&buf, " (%s)", loc[0].Dir) + } else { + buf.WriteString(loc[0].Dir) + } + } + + return buf.String() +} + +// ImportMissingError is used for errors where an imported package cannot be found. +type ImportMissingError struct { + Path string +} + +func (e *ImportMissingError) Error() string { + return "cannot find module providing package " + e.Path +} diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go new file mode 100644 index 0000000000..05d5235b4c --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go @@ -0,0 +1,370 @@ +package modpkgload + +import ( + "context" + "fmt" + "runtime" + "slices" + "sort" + "strings" + "sync/atomic" + + "cuelang.org/go/internal/mod/modimports" + "cuelang.org/go/internal/mod/modrequirements" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/module" +) + +// Registry represents a module registry, or at least this package's view of it. +type Registry interface { + // Fetch returns the location of the contents for the given module + // version, downloading it if necessary. + Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error) +} + +// Flags is a set of flags tracking metadata about a package. +type Flags int8 + +const ( + // PkgInAll indicates that the package is in the "all" package pattern, + // regardless of whether we are loading the "all" package pattern. + // + // When the PkgInAll flag and PkgImportsLoaded flags are both set, the caller + // who set the last of those flags must propagate the PkgInAll marking to all + // of the imports of the marked package. + PkgInAll Flags = 1 << iota + + // PkgIsRoot indicates that the package matches one of the root package + // patterns requested by the caller. + PkgIsRoot + + // PkgFromRoot indicates that the package is in the transitive closure of + // imports starting at the roots. (Note that every package marked as PkgIsRoot + // is also trivially marked PkgFromRoot.) + PkgFromRoot + + // PkgImportsLoaded indicates that the Imports field of a + // Pkg have been populated. + PkgImportsLoaded +) + +func (f Flags) String() string { + var buf strings.Builder + set := func(f1 Flags, s string) { + if (f & f1) == 0 { + return + } + if buf.Len() > 0 { + buf.WriteString(",") + } + buf.WriteString(s) + f &^= f1 + } + set(PkgInAll, "inAll") + set(PkgIsRoot, "isRoot") + set(PkgFromRoot, "fromRoot") + set(PkgImportsLoaded, "importsLoaded") + if f != 0 { + set(f, fmt.Sprintf("extra%x", int(f))) + } + return buf.String() +} + +// has reports whether all of the flags in cond are set in f. +func (f Flags) has(cond Flags) bool { + return f&cond == cond +} + +type Packages struct { + mainModuleVersion module.Version + mainModuleLoc module.SourceLoc + pkgCache par.Cache[string, *Package] + pkgs []*Package + rootPkgs []*Package + work *par.Queue + requirements *modrequirements.Requirements + registry Registry +} + +type Package struct { + // Populated at construction time: + path string // import path + + // Populated at construction time and updated by [loader.applyPkgFlags]: + flags atomicLoadPkgFlags + + // Populated by [loader.load]. + mod module.Version // module providing package + locs []module.SourceLoc // location of source code directories + err error // error loading package + imports []*Package // packages imported by this one + inStd bool + fromExternal bool + altMods []module.Version // modules that could have contained the package but did not + + // Populated by postprocessing in [Packages.buildStacks]: + stack *Package // package importing this one in minimal import stack for this pkg +} + +func (pkg *Package) ImportPath() string { + return pkg.path +} + +func (pkg *Package) FromExternalModule() bool { + return pkg.fromExternal +} + +func (pkg *Package) Locations() []module.SourceLoc { + return pkg.locs +} + +func (pkg *Package) Error() error { + return pkg.err +} + +func (pkg *Package) SetError(err error) { + pkg.err = err +} + +func (pkg *Package) HasFlags(flags Flags) bool { + return pkg.flags.has(flags) +} + +func (pkg *Package) Imports() []*Package { + return pkg.imports +} + +func (pkg *Package) Flags() Flags { + return pkg.flags.get() +} + +func (pkg *Package) Mod() module.Version { + return pkg.mod +} + +// LoadPackages loads information about all the given packages and the +// packages they import, recursively, using modules from the given +// requirements to determine which modules they might be obtained from, +// and reg to download module contents. +// +// rootPkgPaths should only contain canonical import paths. +func LoadPackages( + ctx context.Context, + mainModulePath string, + mainModuleLoc module.SourceLoc, + rs *modrequirements.Requirements, + reg Registry, + rootPkgPaths []string, +) *Packages { + pkgs := &Packages{ + mainModuleVersion: module.MustNewVersion(mainModulePath, ""), + mainModuleLoc: mainModuleLoc, + requirements: rs, + registry: reg, + work: par.NewQueue(runtime.GOMAXPROCS(0)), + } + inRoots := map[*Package]bool{} + pkgs.rootPkgs = make([]*Package, 0, len(rootPkgPaths)) + for _, p := range rootPkgPaths { + // TODO the original logic didn't add PkgInAll here. Not sure why, + // and that might be a lurking problem. + if root := pkgs.addPkg(ctx, p, PkgIsRoot|PkgInAll); !inRoots[root] { + pkgs.rootPkgs = append(pkgs.rootPkgs, root) + inRoots[root] = true + } + } + <-pkgs.work.Idle() + pkgs.buildStacks() + return pkgs +} + +// buildStacks computes minimal import stacks for each package, +// for use in error messages. When it completes, packages that +// are part of the original root set have pkg.stack == nil, +// and other packages have pkg.stack pointing at the next +// package up the import stack in their minimal chain. +// As a side effect, buildStacks also constructs ld.pkgs, +// the list of all packages loaded. +func (pkgs *Packages) buildStacks() { + for _, pkg := range pkgs.rootPkgs { + pkg.stack = pkg // sentinel to avoid processing in next loop + pkgs.pkgs = append(pkgs.pkgs, pkg) + } + for i := 0; i < len(pkgs.pkgs); i++ { // not range: appending to pkgs.pkgs in loop + pkg := pkgs.pkgs[i] + for _, next := range pkg.imports { + if next.stack == nil { + next.stack = pkg + pkgs.pkgs = append(pkgs.pkgs, next) + } + } + } + for _, pkg := range pkgs.rootPkgs { + pkg.stack = nil + } +} + +func (pkgs *Packages) Roots() []*Package { + return slices.Clip(pkgs.rootPkgs) +} + +func (pkgs *Packages) All() []*Package { + return slices.Clip(pkgs.pkgs) +} + +// Pkg obtains a given package given its canonical import path. +func (pkgs *Packages) Pkg(canonicalPkgPath string) *Package { + pkg, _ := pkgs.pkgCache.Get(canonicalPkgPath) + return pkg +} + +func (pkgs *Packages) addPkg(ctx context.Context, pkgPath string, flags Flags) *Package { + pkg := pkgs.pkgCache.Do(pkgPath, func() *Package { + pkg := &Package{ + path: pkgPath, + } + pkgs.applyPkgFlags(pkg, flags) + + pkgs.work.Add(func() { pkgs.load(ctx, pkg) }) + return pkg + }) + + // Ensure the flags apply even if the package already existed. + pkgs.applyPkgFlags(pkg, flags) + return pkg +} + +func (pkgs *Packages) load(ctx context.Context, pkg *Package) { + if IsStdlibPackage(pkg.path) { + pkg.inStd = true + return + } + pkg.fromExternal = pkg.mod != pkgs.mainModuleVersion + pkg.mod, pkg.locs, pkg.altMods, pkg.err = pkgs.importFromModules(ctx, pkg.path) + if pkg.err != nil { + return + } + if pkgs.mainModuleVersion.Path() == pkg.mod.Path() { + pkgs.applyPkgFlags(pkg, PkgInAll) + } + pkgQual := module.ParseImportPath(pkg.path).Qualifier + if pkgQual == "" { + pkg.err = fmt.Errorf("cannot determine package name from import path %q", pkg.path) + return + } + importsMap := make(map[string]bool) + foundPackageFile := false + for _, loc := range pkg.locs { + // Layer an iterator whose yield function keeps track of whether we have seen + // a single valid CUE file in the package directory. + // Otherwise we would have to iterate twice, causing twice as many io/fs operations. + pkgFileIter := func(yield func(modimports.ModuleFile, error) bool) { + yield2 := func(mf modimports.ModuleFile, err error) bool { + foundPackageFile = err == nil + return yield(mf, err) + } + modimports.PackageFiles(loc.FS, loc.Dir, pkgQual)(yield2) + } + imports, err := modimports.AllImports(pkgFileIter) + if err != nil { + pkg.err = fmt.Errorf("cannot get imports: %v", err) + return + } + for _, imp := range imports { + importsMap[imp] = true + } + } + if !foundPackageFile { + pkg.err = fmt.Errorf("no files in package directory with package name %q", pkgQual) + return + } + imports := make([]string, 0, len(importsMap)) + for imp := range importsMap { + imports = append(imports, imp) + } + sort.Strings(imports) // Make the algorithm deterministic for tests. + + pkg.imports = make([]*Package, 0, len(imports)) + var importFlags Flags + if pkg.flags.has(PkgInAll) { + importFlags = PkgInAll + } + for _, path := range imports { + pkg.imports = append(pkg.imports, pkgs.addPkg(ctx, path, importFlags)) + } + pkgs.applyPkgFlags(pkg, PkgImportsLoaded) +} + +// applyPkgFlags updates pkg.flags to set the given flags and propagate the +// (transitive) effects of those flags, possibly loading or enqueueing further +// packages as a result. +func (pkgs *Packages) applyPkgFlags(pkg *Package, flags Flags) { + if flags == 0 { + return + } + + if flags.has(PkgInAll) { + // This package matches a root pattern by virtue of being in "all". + flags |= PkgIsRoot + } + if flags.has(PkgIsRoot) { + flags |= PkgFromRoot + } + + old := pkg.flags.update(flags) + new := old | flags + if new == old || !new.has(PkgImportsLoaded) { + // We either didn't change the state of pkg, or we don't know anything about + // its dependencies yet. Either way, we can't usefully load its test or + // update its dependencies. + return + } + + if new.has(PkgInAll) && !old.has(PkgInAll|PkgImportsLoaded) { + // We have just marked pkg with pkgInAll, or we have just loaded its + // imports, or both. Now is the time to propagate pkgInAll to the imports. + for _, dep := range pkg.imports { + pkgs.applyPkgFlags(dep, PkgInAll) + } + } + + if new.has(PkgFromRoot) && !old.has(PkgFromRoot|PkgImportsLoaded) { + for _, dep := range pkg.imports { + pkgs.applyPkgFlags(dep, PkgFromRoot) + } + } +} + +// An atomicLoadPkgFlags stores a loadPkgFlags for which individual flags can be +// added atomically. +type atomicLoadPkgFlags struct { + bits atomic.Int32 +} + +// update sets the given flags in af (in addition to any flags already set). +// +// update returns the previous flag state so that the caller may determine which +// flags were newly-set. +func (af *atomicLoadPkgFlags) update(flags Flags) (old Flags) { + for { + old := af.bits.Load() + new := old | int32(flags) + if new == old || af.bits.CompareAndSwap(old, new) { + return Flags(old) + } + } +} + +func (af *atomicLoadPkgFlags) get() Flags { + return Flags(af.bits.Load()) +} + +// has reports whether all of the flags in cond are set in af. +func (af *atomicLoadPkgFlags) has(cond Flags) bool { + return Flags(af.bits.Load())&cond == cond +} + +func IsStdlibPackage(pkgPath string) bool { + firstElem, _, _ := strings.Cut(pkgPath, "/") + return !strings.Contains(firstElem, ".") +} diff --git a/vendor/cuelang.org/go/internal/mod/modregistry/client.go b/vendor/cuelang.org/go/internal/mod/modregistry/client.go deleted file mode 100644 index f378323b2f..0000000000 --- a/vendor/cuelang.org/go/internal/mod/modregistry/client.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2023 CUE Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package modregistry - -import ( - "archive/zip" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "strings" - - "cuelabs.dev/go/oci/ociregistry" - "cuelang.org/go/internal/mod/semver" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - - "cuelang.org/go/internal/mod/modfile" - "cuelang.org/go/internal/mod/module" - "cuelang.org/go/internal/mod/modzip" -) - -var ErrNotFound = fmt.Errorf("module not found") - -// Client represents a OCI-registry-backed client that -// provides a store for CUE modules. -type Client struct { - registry ociregistry.Interface -} - -const ( - moduleArtifactType = "application/vnd.cue.module.v1+json" - moduleFileMediaType = "application/vnd.cue.modulefile.v1" - moduleAnnotation = "works.cue.module" -) - -// NewClient returns a new client that talks to the registry at the given -// hostname. -func NewClient(registry ociregistry.Interface) *Client { - return &Client{ - registry: registry, - } -} - -// GetModule returns the module instance for the given version. -// It returns an error that satisfies errors.Is(ErrNotFound) if the -// module is not present in the store at this version. -func (c *Client) GetModule(ctx context.Context, m module.Version) (*Module, error) { - repoName := c.repoName(m.Path()) - modDesc, err := c.registry.ResolveTag(ctx, repoName, m.Version()) - if err != nil { - if errors.Is(err, ociregistry.ErrManifestUnknown) { - return nil, fmt.Errorf("module %v: %w", m, ErrNotFound) - } - return nil, fmt.Errorf("module %v: %v", m, err) - } - manifest, err := fetchManifest(ctx, c.registry, repoName, modDesc) - if err != nil { - return nil, fmt.Errorf("cannot unmarshal manifest data: %v", err) - } - if !isModule(manifest) { - return nil, fmt.Errorf("%v does not resolve to a manifest (media type is %q)", m, modDesc.MediaType) - } - // TODO check type of manifest too. - if n := len(manifest.Layers); n != 2 { - return nil, fmt.Errorf("module manifest should refer to exactly two blobs, but got %d", n) - } - if !isModuleFile(manifest.Layers[1]) { - return nil, fmt.Errorf("unexpected media type %q for module file blob", manifest.Layers[1].MediaType) - } - // TODO check that the other blobs are of the expected type (application/zip). - return &Module{ - client: c, - repo: repoName, - manifest: *manifest, - }, nil -} - -func (c *Client) repoName(modPath string) string { - path, _, _ := module.SplitPathVersion(modPath) - return path -} - -// ModuleVersions returns all the versions for the module with the given path. -func (c *Client) ModuleVersions(ctx context.Context, m string) ([]string, error) { - _, major, ok := module.SplitPathVersion(m) - if !ok { - return nil, fmt.Errorf("non-canonical module path %q", m) - } - var tags []string - iter := c.registry.Tags(ctx, c.repoName(m)) - for { - tag, ok := iter.Next() - if !ok { - break - } - if semver.IsValid(tag) && semver.Major(tag) == major { - tags = append(tags, tag) - } - } - if err := iter.Error(); err != nil { - return nil, err - } - return tags, nil -} - -// CheckedModule represents module content that has passed the same -// checks made by [Client.PutModule]. The caller should not mutate -// any of the values returned by its methods. -type CheckedModule struct { - mv module.Version - blobr io.ReaderAt - size int64 - zipr *zip.Reader - modFile *modfile.File - modFileContent []byte -} - -// Version returns the version that the module will be tagged as. -func (m *CheckedModule) Version() module.Version { - return m.mv -} - -// Version returns the parsed contents of the modules cue.mod/module.cue file. -func (m *CheckedModule) ModFile() *modfile.File { - return m.modFile -} - -// ModFileContent returns the raw contents of the modules cue.mod/module.cue file. -func (m *CheckedModule) ModFileContent() []byte { - return m.modFileContent -} - -// Zip returns the reader for the module's zip archive. -func (m *CheckedModule) Zip() *zip.Reader { - return m.zipr -} - -// PutCheckedModule is like [Client.PutModule] except that it allows the -// caller to do some additional checks (see [CheckModule] for more info). -func (c *Client) PutCheckedModule(ctx context.Context, m *CheckedModule) error { - repoName := c.repoName(m.mv.Path()) - selfDigest, err := digest.FromReader(io.NewSectionReader(m.blobr, 0, m.size)) - if err != nil { - return fmt.Errorf("cannot read module zip file: %v", err) - } - // Upload the actual module's content - // TODO should we use a custom media type for this? - configDesc, err := c.scratchConfig(ctx, repoName, moduleArtifactType) - if err != nil { - return fmt.Errorf("cannot make scratch config: %v", err) - } - manifest := &ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, // historical value. does not pertain to OCI or docker version - }, - MediaType: ocispec.MediaTypeImageManifest, - Config: configDesc, - // One for self, one for module file. - Layers: []ocispec.Descriptor{{ - Digest: selfDigest, - MediaType: "application/zip", - Size: m.size, - }, { - Digest: digest.FromBytes(m.modFileContent), - MediaType: moduleFileMediaType, - Size: int64(len(m.modFileContent)), - }}, - } - - if _, err := c.registry.PushBlob(ctx, repoName, manifest.Layers[0], io.NewSectionReader(m.blobr, 0, m.size)); err != nil { - return fmt.Errorf("cannot push module contents: %v", err) - } - if _, err := c.registry.PushBlob(ctx, repoName, manifest.Layers[1], bytes.NewReader(m.modFileContent)); err != nil { - return fmt.Errorf("cannot push cue.mod/module.cue contents: %v", err) - } - manifestData, err := json.Marshal(manifest) - if err != nil { - return fmt.Errorf("cannot marshal manifest: %v", err) - } - if _, err := c.registry.PushManifest(ctx, repoName, m.mv.Version(), manifestData, ocispec.MediaTypeImageManifest); err != nil { - return fmt.Errorf("cannot tag %v: %v", m.mv, err) - } - return nil -} - -// PutModule puts a module whose contents are held as a zip archive inside f. -// It assumes all the module dependencies are correctly resolved and present -// inside the cue.mod/module.cue file. -// -// TODO check deps are resolved correctly? Or is that too domain-specific for this package? -// Is it a problem to call zip.CheckZip twice? -func (c *Client) PutModule(ctx context.Context, m module.Version, r io.ReaderAt, size int64) error { - cm, err := CheckModule(m, r, size) - if err != nil { - return err - } - return c.PutCheckedModule(ctx, cm) -} - -// CheckModule checks a module's zip file before uploading it. -// This does the same checks that [Client.PutModule] does, so -// can be used to avoid doing duplicate work when an uploader -// wishes to do more checks that are implemented by that method. -// -// Note that the returned [CheckedModule] value contains r, so will -// be invalidated if r is closed. -func CheckModule(m module.Version, blobr io.ReaderAt, size int64) (*CheckedModule, error) { - zipr, modf, _, err := modzip.CheckZip(m, blobr, size) - if err != nil { - return nil, fmt.Errorf("module zip file check failed: %v", err) - } - modFileContent, mf, err := checkModFile(m, modf) - if err != nil { - return nil, fmt.Errorf("module.cue file check failed: %v", err) - } - return &CheckedModule{ - mv: m, - blobr: blobr, - size: size, - zipr: zipr, - modFile: mf, - modFileContent: modFileContent, - }, nil -} - -func checkModFile(m module.Version, f *zip.File) ([]byte, *modfile.File, error) { - r, err := f.Open() - if err != nil { - return nil, nil, err - } - defer r.Close() - // TODO check max size? - data, err := io.ReadAll(r) - if err != nil { - return nil, nil, err - } - mf, err := modfile.Parse(data, f.Name) - if err != nil { - return nil, nil, err - } - if mf.Module != m.Path() { - return nil, nil, fmt.Errorf("module path %q found in %s does not match module path being published %q", mf.Module, f.Name, m.Path()) - } - _, major, ok := module.SplitPathVersion(mf.Module) - if !ok { - // Note: can't happen because we already know that mf.Module is the same - // as m.Path which is a valid module path. - return nil, nil, fmt.Errorf("invalid module path %q", mf.Module) - } - wantMajor := semver.Major(m.Version()) - if major != wantMajor { - // This can't actually happen because the zip checker checks the major version - // that's being published to, so the above path check also implicitly checks that. - return nil, nil, fmt.Errorf("major version %q found in %s does not match version being published %q", major, f.Name, m.Version()) - } - // Check that all dependency versions look valid. - for modPath, dep := range mf.Deps { - _, err := module.NewVersion(modPath, dep.Version) - if err != nil { - return nil, nil, fmt.Errorf("invalid dependency: %v @ %v", modPath, dep.Version) - } - } - return data, mf, nil -} - -// Module represents a CUE module instance. -type Module struct { - client *Client - repo string - manifest ocispec.Manifest -} - -// ModuleFile returns the contents of the cue.mod/module.cue file. -func (m *Module) ModuleFile(ctx context.Context) ([]byte, error) { - r, err := m.client.registry.GetBlob(ctx, m.repo, m.manifest.Layers[1].Digest) - if err != nil { - return nil, err - } - defer r.Close() - return io.ReadAll(r) -} - -// GetZip returns a reader that can be used to read the contents of the zip -// archive containing the module files. The reader should be closed after use, -// and the contents should not be assumed to be correct until the close -// error has been checked. -func (m *Module) GetZip(ctx context.Context) (io.ReadCloser, error) { - return m.client.registry.GetBlob(ctx, m.repo, m.manifest.Layers[0].Digest) -} - -func fetchManifest(ctx context.Context, r ociregistry.Interface, repoName string, desc ocispec.Descriptor) (*ociregistry.Manifest, error) { - if !isJSON(desc.MediaType) { - return nil, fmt.Errorf("expected JSON media type but %q does not look like JSON", desc.MediaType) - } - rd, err := r.GetManifest(ctx, repoName, desc.Digest) - if err != nil { - return nil, err - } - defer rd.Close() - data, err := io.ReadAll(rd) - if err != nil { - return nil, err - } - var m ociregistry.Manifest - if err := json.Unmarshal(data, &m); err != nil { - return nil, fmt.Errorf("cannot decode %s content as manifest: %v", desc.MediaType, err) - } - return &m, nil -} - -func isModule(m *ocispec.Manifest) bool { - // TODO check m.ArtifactType too when that's defined? - // See https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions - return m.Config.MediaType == moduleArtifactType -} - -func isModuleFile(desc ocispec.Descriptor) bool { - return desc.ArtifactType == moduleFileMediaType || - desc.MediaType == moduleFileMediaType -} - -// isJSON reports whether the given media type has JSON as an underlying encoding. -// TODO this is a guess. There's probably a more correct way to do it. -func isJSON(mediaType string) bool { - return strings.HasSuffix(mediaType, "+json") || strings.HasSuffix(mediaType, "/json") -} - -// scratchConfig returns a dummy configuration consisting only of the -// two-byte configuration {}. -// https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-of-a-scratch-config-or-layer-descriptor -func (c *Client) scratchConfig(ctx context.Context, repoName string, mediaType string) (ocispec.Descriptor, error) { - // TODO check if it exists already to avoid push? - content := []byte("{}") - desc := ocispec.Descriptor{ - Digest: digest.FromBytes(content), - MediaType: mediaType, - Size: int64(len(content)), - } - if _, err := c.registry.PushBlob(ctx, repoName, desc, bytes.NewReader(content)); err != nil { - return ocispec.Descriptor{}, err - } - return desc, nil -} diff --git a/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go new file mode 100644 index 0000000000..c62347da88 --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go @@ -0,0 +1,420 @@ +package modrequirements + +import ( + "context" + "fmt" + "runtime" + "slices" + "sync" + "sync/atomic" + + "cuelang.org/go/internal/mod/mvs" + "cuelang.org/go/internal/mod/semver" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/module" +) + +type majorVersionDefault struct { + version string + explicitDefault bool + ambiguousDefault bool +} + +// Requirements holds a set of module requirements. It does not +// initially load the full module graph, as that can be expensive. +// Instead the [Registry.Graph] method can be used to lazily construct +// that. +type Requirements struct { + registry Registry + mainModuleVersion module.Version + + // rootModules is the set of root modules of the graph, sorted and capped to + // length. It may contain duplicates, and may contain multiple versions for a + // given module path. The root modules are the main module's direct requirements. + rootModules []module.Version + maxRootVersion map[string]string + + // origDefaultMajorVersions holds the original passed to New. + origDefaultMajorVersions map[string]string + + // defaultMajorVersions is derived from the above information, + // also holding modules that have a default due to being unique + // in the roots. + defaultMajorVersions map[string]majorVersionDefault + + graphOnce sync.Once // guards writes to (but not reads from) graph + graph atomic.Pointer[cachedGraph] +} + +// Registry holds the contents of a registry. It's expected that this will +// cache any results that it returns. +type Registry interface { + Requirements(ctx context.Context, m module.Version) ([]module.Version, error) +} + +// A cachedGraph is a non-nil *ModuleGraph, together with any error discovered +// while loading that graph. +type cachedGraph struct { + mg *ModuleGraph + err error // If err is non-nil, mg may be incomplete (but must still be non-nil). +} + +// NewRequirements returns a new requirement set with the given root modules. +// The dependencies of the roots will be loaded lazily from the given +// Registry value at the first call to the Graph method. +// +// The rootModules slice must be sorted according to [module.Sort]. +// +// The defaultMajorVersions slice holds the default major version for (major-version-less) +// mdule paths, if any have been specified. For example {"foo.com/bar": "v0"} specifies +// that the default major version for the module `foo.com/bar` is `v0`. +// +// The caller must not modify rootModules or defaultMajorVersions after passing +// them to NewRequirements. +func NewRequirements(mainModulePath string, reg Registry, rootModules []module.Version, defaultMajorVersions map[string]string) *Requirements { + mainModuleVersion := module.MustNewVersion(mainModulePath, "") + // TODO add direct, so we can tell which modules are directly used by the + // main module. + for i, v := range rootModules { + if v.Path() == mainModulePath { + panic(fmt.Sprintf("NewRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) + } + if !v.IsValid() { + panic("NewRequirements with invalid zero version") + } + } + rs := &Requirements{ + registry: reg, + mainModuleVersion: mainModuleVersion, + rootModules: rootModules, + maxRootVersion: make(map[string]string, len(rootModules)), + } + for i, m := range rootModules { + if i > 0 { + prev := rootModules[i-1] + if prev.Path() > m.Path() || (prev.Path() == m.Path() && semver.Compare(prev.Version(), m.Version()) > 0) { + panic(fmt.Sprintf("NewRequirements called with unsorted roots: %v", rootModules)) + } + } + if v, ok := rs.maxRootVersion[m.Path()]; !ok || semver.Compare(v, m.Version()) < 0 { + rs.maxRootVersion[m.Path()] = m.Version() + } + } + rs.initDefaultMajorVersions(defaultMajorVersions) + return rs +} + +// WithDefaultMajorVersions returns rs but with the given default major versions. +// The caller should not modify the map after calling this method. +func (rs *Requirements) WithDefaultMajorVersions(defaults map[string]string) *Requirements { + rs1 := &Requirements{ + registry: rs.registry, + mainModuleVersion: rs.mainModuleVersion, + rootModules: rs.rootModules, + maxRootVersion: rs.maxRootVersion, + } + // Initialize graph and graphOnce in rs1 to mimic their state in rs. + // We can't copy the sync.Once, so if it's already triggered, we'll + // run the Once with a no-op function to get the same effect. + rs1.graph.Store(rs.graph.Load()) + if rs1.GraphIsLoaded() { + rs1.graphOnce.Do(func() {}) + } + rs1.initDefaultMajorVersions(defaults) + return rs1 +} + +func (rs *Requirements) initDefaultMajorVersions(defaultMajorVersions map[string]string) { + rs.origDefaultMajorVersions = defaultMajorVersions + rs.defaultMajorVersions = make(map[string]majorVersionDefault) + for mpath, v := range defaultMajorVersions { + if _, _, ok := module.SplitPathVersion(mpath); ok { + panic(fmt.Sprintf("NewRequirements called with major version in defaultMajorVersions %q", mpath)) + } + if semver.Major(v) != v { + panic(fmt.Sprintf("NewRequirements called with invalid major version %q for module %q", v, mpath)) + } + rs.defaultMajorVersions[mpath] = majorVersionDefault{ + version: v, + explicitDefault: true, + } + } + // Add defaults for all modules that have exactly one major version + // and no existing default. + for _, m := range rs.rootModules { + if m.IsLocal() { + continue + } + mpath := m.BasePath() + d, ok := rs.defaultMajorVersions[mpath] + if !ok { + rs.defaultMajorVersions[mpath] = majorVersionDefault{ + version: semver.Major(m.Version()), + } + continue + } + if d.explicitDefault { + continue + } + d.ambiguousDefault = true + rs.defaultMajorVersions[mpath] = d + } +} + +// RootSelected returns the version of the root dependency with the given module +// path, or the zero module.Version and ok=false if the module is not a root +// dependency. +func (rs *Requirements) RootSelected(mpath string) (version string, ok bool) { + if mpath == rs.mainModuleVersion.Path() { + return "", true + } + if v, ok := rs.maxRootVersion[mpath]; ok { + return v, true + } + return "", false +} + +// DefaultMajorVersions returns the defaults that the requirements was +// created with. The returned map should not be modified. +func (rs *Requirements) DefaultMajorVersions() map[string]string { + return rs.origDefaultMajorVersions +} + +type MajorVersionDefaultStatus byte + +const ( + ExplicitDefault MajorVersionDefaultStatus = iota + NonExplicitDefault + NoDefault + AmbiguousDefault +) + +// DefaultMajorVersion returns the default major version for the given +// module path (which should not itself contain a major version). +// +// It also returns information about the default. +func (rs *Requirements) DefaultMajorVersion(mpath string) (string, MajorVersionDefaultStatus) { + d, ok := rs.defaultMajorVersions[mpath] + switch { + case !ok: + return "", NoDefault + case d.ambiguousDefault: + return "", AmbiguousDefault + case d.explicitDefault: + return d.version, ExplicitDefault + default: + return d.version, NonExplicitDefault + } +} + +// rootModules returns the set of root modules of the graph, sorted and capped to +// length. It may contain duplicates, and may contain multiple versions for a +// given module path. +func (rs *Requirements) RootModules() []module.Version { + return slices.Clip(rs.rootModules) +} + +// Graph returns the graph of module requirements loaded from the current +// root modules (as reported by RootModules). +// +// Graph always makes a best effort to load the requirement graph despite any +// errors, and always returns a non-nil *ModuleGraph. +// +// If the requirements of any relevant module fail to load, Graph also +// returns a non-nil error of type *mvs.BuildListError. +func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { + rs.graphOnce.Do(func() { + mg, mgErr := rs.readModGraph(ctx) + rs.graph.Store(&cachedGraph{mg, mgErr}) + }) + cached := rs.graph.Load() + return cached.mg, cached.err +} + +// GraphIsLoaded reports whether Graph has been called previously. +func (rs *Requirements) GraphIsLoaded() bool { + return rs.graph.Load() != nil +} + +// A ModuleGraph represents the complete graph of module dependencies +// of a main module. +// +// If the main module supports module graph pruning, the graph does not include +// transitive dependencies of non-root (implicit) dependencies. +type ModuleGraph struct { + g *mvs.Graph[module.Version] + + buildListOnce sync.Once + buildList []module.Version +} + +// cueModSummary returns a summary of the cue.mod/module.cue file for module m, +// taking into account any replacements for m, exclusions of its dependencies, +// and/or vendoring. +// +// m must be a version in the module graph, reachable from the Target module. +// cueModSummary must not be called for the Target module +// itself, as its requirements may change. +// +// The caller must not modify the returned summary. +func (rs *Requirements) cueModSummary(ctx context.Context, m module.Version) (*modFileSummary, error) { + require, err := rs.registry.Requirements(ctx, m) + if err != nil { + return nil, err + } + // TODO account for replacements, exclusions, etc. + return &modFileSummary{ + module: m, + require: require, + }, nil +} + +type modFileSummary struct { + module module.Version + require []module.Version +} + +// readModGraph reads and returns the module dependency graph starting at the +// given roots. +// +// readModGraph does not attempt to diagnose or update inconsistent roots. +func (rs *Requirements) readModGraph(ctx context.Context) (*ModuleGraph, error) { + var ( + mu sync.Mutex // guards mg.g and hasError during loading + hasError bool + mg = &ModuleGraph{ + g: mvs.NewGraph[module.Version](module.Versions{}, cmpVersion, []module.Version{rs.mainModuleVersion}), + } + ) + + mg.g.Require(rs.mainModuleVersion, rs.rootModules) + + var ( + loadQueue = par.NewQueue(runtime.GOMAXPROCS(0)) + loading sync.Map // module.Version → nil; the set of modules that have been or are being loaded + loadCache par.ErrCache[module.Version, *modFileSummary] + ) + + // loadOne synchronously loads the explicit requirements for module m. + // It does not load the transitive requirements of m. + loadOne := func(m module.Version) (*modFileSummary, error) { + return loadCache.Do(m, func() (*modFileSummary, error) { + summary, err := rs.cueModSummary(ctx, m) + + mu.Lock() + if err == nil { + mg.g.Require(m, summary.require) + } else { + hasError = true + } + mu.Unlock() + + return summary, err + }) + } + + for _, m := range rs.rootModules { + m := m + if !m.IsValid() { + panic("root module version is invalid") + } + if m.IsLocal() || m.Version() == "none" { + continue + } + + if _, dup := loading.LoadOrStore(m, nil); dup { + // m has already been enqueued for loading. Since unpruned loading may + // follow cycles in the requirement graph, we need to return early + // to avoid making the load queue infinitely long. + continue + } + + loadQueue.Add(func() { + loadOne(m) + // If there's an error, findError will report it later. + }) + } + <-loadQueue.Idle() + + if hasError { + return mg, mg.findError(&loadCache) + } + return mg, nil +} + +// RequiredBy returns the dependencies required by module m in the graph, +// or ok=false if module m's dependencies are pruned out. +// +// The caller must not modify the returned slice, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) { + return mg.g.RequiredBy(m) +} + +// Selected returns the selected version of the module with the given path. +// +// If no version is selected, Selected returns version "none". +func (mg *ModuleGraph) Selected(path string) (version string) { + return mg.g.Selected(path) +} + +// WalkBreadthFirst invokes f once, in breadth-first order, for each module +// version other than "none" that appears in the graph, regardless of whether +// that version is selected. +func (mg *ModuleGraph) WalkBreadthFirst(f func(m module.Version)) { + mg.g.WalkBreadthFirst(f) +} + +// BuildList returns the selected versions of all modules present in the graph, +// beginning with the main modules. +// +// The order of the remaining elements in the list is deterministic +// but arbitrary. +// +// The caller must not modify the returned list, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) BuildList() []module.Version { + mg.buildListOnce.Do(func() { + mg.buildList = slices.Clip(mg.g.BuildList()) + }) + return mg.buildList +} + +func (mg *ModuleGraph) findError(loadCache *par.ErrCache[module.Version, *modFileSummary]) error { + errStack := mg.g.FindPath(func(m module.Version) bool { + _, err := loadCache.Get(m) + return err != nil && err != par.ErrCacheEntryNotFound + }) + if len(errStack) > 0 { + // TODO it seems that this stack can never be more than one + // element long, becasuse readModGraph never goes more + // than one depth level below the root requirements. + // Given that the top of the stack will always be the main + // module and that BuildListError elides the first element + // in this case, is it really worth using FindPath? + _, err := loadCache.Get(errStack[len(errStack)-1]) + var noUpgrade func(from, to module.Version) bool + return mvs.NewBuildListError[module.Version](err, errStack, module.Versions{}, noUpgrade) + } + + return nil +} + +// cmpVersion implements the comparison for versions in the module loader. +// +// It is consistent with semver.Compare except that as a special case, +// the version "" is considered higher than all other versions. +// The main module (also known as the target) has no version and must be chosen +// over other versions of the same module in the module dependency graph. +func cmpVersion(v1, v2 string) int { + if v2 == "" { + if v1 == "" { + return 0 + } + return -1 + } + if v1 == "" { + return 1 + } + return semver.Compare(v1, v2) +} diff --git a/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go new file mode 100644 index 0000000000..878777029b --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go @@ -0,0 +1,532 @@ +// Copyright 2024 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package modresolve + +import ( + "crypto/sha256" + _ "embed" + "fmt" + "net" + "net/netip" + "path" + "sort" + "strings" + "sync" + + "cuelabs.dev/go/oci/ociregistry/ociref" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/cuecontext" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/mod/module" +) + +// pathEncoding represents one of the possible types of +// encoding for module paths within a registry. +// It reflects the #registry.pathEncoding disjunction +// in schema.cue. +// TODO it would be nice if this could be auto-generated +// from the schema. +type pathEncoding string + +const ( + encPath pathEncoding = "path" + encHashAsRepo pathEncoding = "hashAsRepo" + encHashAsTag pathEncoding = "hashAsTag" +) + +// LocationResolver resolves module paths to a location +// consisting of a host name of a registry and where +// in that registry the module is to be found. +// +// Note: The implementation in this package operates entirely lexically, +// which is why [Location] contains only a host name and not an actual +// [ociregistry.Interface] implementation. +type LocationResolver interface { + // ResolveToLocation resolves a base module path (without a version + // suffix, a.k.a. OCI repository name) and optional version to + // the location for that path. It reports whether it can find + // appropriate location for the module. + // + // If the version is empty, the Tag in the returned Location + // will hold the prefix that all versions of the module in its + // repository have. That prefix will be followed by the version + // itself. + ResolveToLocation(path string, vers string) (Location, bool) + + // AllHosts returns all the registry hosts that the resolver + // might resolve to, ordered lexically by hostname. + AllHosts() []Host +} + +// Host represents a registry host name. +type Host struct { + // Name holds the IP host name of the registry. + // If it's an IP v6 address, it will be surrounded with + // square brackets ([, ]). + Name string + // Insecure holds whether this host should be connected + // to insecurely (with an HTTP rather than HTTP connection). + Insecure bool +} + +// Location represents the location for a given module version or versions. +type Location struct { + // Host holds the host or host:port of the registry. + Host string + + // Insecure holds whether an insecure connection + // should be used when connecting to the registry. + Insecure bool + + // Repository holds the repository to store the module in. + Repository string + + // Tag holds the tag for the module version. + // If an empty version was passed to + // Resolve, it holds the prefix shared by all version + // tags for the module. + Tag string +} + +// config mirrors the #File definition in schema.cue. +// TODO it would be nice to be able to generate this +// type directly from the schema. +type config struct { + ModuleRegistries map[string]*registryConfig `json:"moduleRegistries,omitempty"` + DefaultRegistry *registryConfig `json:"defaultRegistry,omitempty"` +} + +func (cfg *config) init() error { + for prefix, reg := range cfg.ModuleRegistries { + if err := module.CheckPathWithoutVersion(prefix); err != nil { + return fmt.Errorf("invalid module path %q: %v", prefix, err) + } + if err := reg.init(); err != nil { + return fmt.Errorf("invalid registry configuration in %q: %v", prefix, err) + } + } + if cfg.DefaultRegistry != nil { + if err := cfg.DefaultRegistry.init(); err != nil { + return fmt.Errorf("invalid default registry configuration: %v", err) + } + } + return nil +} + +type registryConfig struct { + Registry string `json:"registry,omitempty"` + PathEncoding pathEncoding `json:"pathEncoding,omitempty"` + PrefixForTags string `json:"prefixForTags,omitempty"` + StripPrefix bool `json:"stripPrefix,omitempty"` + + // The following fields are filled in from Registry after parsing. + none bool + host string + repository string + insecure bool +} + +func (r *registryConfig) init() error { + r1, err := parseRegistry(r.Registry) + if err != nil { + return err + } + r.none, r.host, r.repository, r.insecure = r1.none, r1.host, r1.repository, r1.insecure + + if r.PrefixForTags != "" { + if !ociref.IsValidTag(r.PrefixForTags) { + return fmt.Errorf("invalid tag prefix %q", r.PrefixForTags) + } + } + if r.PathEncoding == "" { + // Shouldn't happen because default should apply. + return fmt.Errorf("empty pathEncoding") + } + if r.StripPrefix { + if r.PathEncoding != encPath { + // TODO we could relax this to allow storing of naked tags + // when the module path matches exactly and hash tags + // otherwise. + return fmt.Errorf("cannot strip prefix unless using path encoding") + } + if r.repository == "" { + return fmt.Errorf("use of stripPrefix requires a non-empty repository within the registry") + } + } + return nil +} + +var ( + configSchemaOnce sync.Once // guards the creation of _configSchema + // TODO remove this mutex when https://cuelang.org/issue/2733 is fixed. + configSchemaMutex sync.Mutex // guards any use of _configSchema + _configSchema cue.Value +) + +//go:embed schema.cue +var configSchemaData []byte + +// RegistryConfigSchema returns the CUE schema +// for the configuration parsed by [ParseConfig]. +func RegistryConfigSchema() string { + // Cut out the copyright header and the header that's + // not pure schema. + schema := string(configSchemaData) + i := strings.Index(schema, "\n// #file ") + if i == -1 { + panic("no file definition found in schema") + } + i++ + return schema[i:] +} + +// ParseConfig parses the registry configuration with the given contents and file name. +// If there is no default registry, then the single registry specified in catchAllDefault +// will be used as a default. +func ParseConfig(configFile []byte, filename string, catchAllDefault string) (LocationResolver, error) { + configSchemaOnce.Do(func() { + ctx := cuecontext.New() + schemav := ctx.CompileBytes(configSchemaData, cue.Filename("cuelang.org/go/internal/mod/modresolve/schema.cue")) + schemav = schemav.LookupPath(cue.MakePath(cue.Def("#file"))) + if err := schemav.Validate(); err != nil { + panic(fmt.Errorf("internal error: invalid CUE registry config schema: %v", errors.Details(err, nil))) + } + _configSchema = schemav + }) + configSchemaMutex.Lock() + defer configSchemaMutex.Unlock() + + v := _configSchema.Context().CompileBytes(configFile, cue.Filename(filename)) + if err := v.Err(); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid registry configuration file") + } + v = v.Unify(_configSchema) + if err := v.Err(); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid configuration file") + } + var cfg config + if err := v.Decode(&cfg); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into registry config struct") + } + if err := cfg.init(); err != nil { + return nil, err + } + if cfg.DefaultRegistry == nil { + if catchAllDefault == "" { + return nil, fmt.Errorf("no default catch-all registry provided") + } + // TODO is it too limiting to have the catch-all registry specified as a simple string? + reg, err := parseRegistry(catchAllDefault) + if err != nil { + return nil, fmt.Errorf("invalid catch-all registry %q: %v", catchAllDefault, err) + } + cfg.DefaultRegistry = reg + } + r := &resolver{ + cfg: cfg, + } + if err := r.initHosts(); err != nil { + return nil, err + } + return r, nil +} + +// ParseCUERegistry parses a registry routing specification that +// maps module prefixes to the registry that should be used to +// fetch that module. +// +// The specification consists of an order-independent, comma-separated list. +// +// Each element either maps a module prefix to the registry that will be used +// for all modules that have that prefix (prefix=registry), or a catch-all registry to be used +// for modules that do not match any prefix (registry). +// +// For example: +// +// myorg.com=myregistry.com/m,catchallregistry.example.org +// +// Any module with a matching prefix will be routed to the given registry. +// A prefix only matches whole path elements. +// In the above example, module myorg.com/foo/bar@v0 will be looked up +// in myregistry.com in the repository m/myorg.com/foo/bar, +// whereas github.com/x/y will be looked up in catchallregistry.example.com. +// +// The registry part is syntactically similar to a [docker reference] +// except that the repository is optional and no tag or digest is allowed. +// Additionally, a +secure or +insecure suffix may be used to indicate +// whether to use a secure or insecure connection. Without that, +// localhost, 127.0.0.1 and [::1] will default to insecure, and anything +// else to secure. +// +// If s does not declare a catch-all registry location, catchAllDefault is +// used. It is an error if s fails to declares a catch-all registry location +// and no catchAllDefault is provided. +// +// [docker reference]: https://pkg.go.dev/github.com/distribution/reference +func ParseCUERegistry(s string, catchAllDefault string) (LocationResolver, error) { + if s == "" && catchAllDefault == "" { + return nil, fmt.Errorf("no catch-all registry or default") + } + if s == "" { + s = catchAllDefault + } + cfg := config{ + ModuleRegistries: make(map[string]*registryConfig), + } + parts := strings.Split(s, ",") + for _, part := range parts { + key, val, ok := strings.Cut(part, "=") + if !ok { + if part == "" { + // TODO or just ignore it? + return nil, fmt.Errorf("empty registry part") + } + if _, ok := cfg.ModuleRegistries[""]; ok { + return nil, fmt.Errorf("duplicate catch-all registry") + } + key, val = "", part + } else { + if key == "" { + return nil, fmt.Errorf("empty module prefix") + } + if val == "" { + return nil, fmt.Errorf("empty registry reference") + } + if err := module.CheckPathWithoutVersion(key); err != nil { + return nil, fmt.Errorf("invalid module path %q: %v", key, err) + } + if _, ok := cfg.ModuleRegistries[key]; ok { + return nil, fmt.Errorf("duplicate module prefix %q", key) + } + } + reg, err := parseRegistry(val) + if err != nil { + return nil, fmt.Errorf("invalid registry %q: %v", val, err) + } + cfg.ModuleRegistries[key] = reg + } + if _, ok := cfg.ModuleRegistries[""]; !ok { + if catchAllDefault == "" { + return nil, fmt.Errorf("no default catch-all registry provided") + } + reg, err := parseRegistry(catchAllDefault) + if err != nil { + return nil, fmt.Errorf("invalid catch-all registry %q: %v", catchAllDefault, err) + } + cfg.ModuleRegistries[""] = reg + } + cfg.DefaultRegistry = cfg.ModuleRegistries[""] + delete(cfg.ModuleRegistries, "") + + r := &resolver{ + cfg: cfg, + } + if err := r.initHosts(); err != nil { + return nil, err + } + return r, nil +} + +type resolver struct { + allHosts []Host + cfg config +} + +func (r *resolver) initHosts() error { + hosts := make(map[string]bool) + addHost := func(reg *registryConfig) error { + if reg.none { + return nil + } + if insecure, ok := hosts[reg.host]; ok { + if insecure != reg.insecure { + return fmt.Errorf("registry host %q is specified both as secure and insecure", reg.host) + } + } else { + hosts[reg.host] = reg.insecure + } + return nil + } + for _, reg := range r.cfg.ModuleRegistries { + if err := addHost(reg); err != nil { + return err + } + } + + if reg := r.cfg.DefaultRegistry; reg != nil { + if err := addHost(reg); err != nil { + return err + } + } + allHosts := make([]Host, 0, len(hosts)) + for host, insecure := range hosts { + allHosts = append(allHosts, Host{ + Name: host, + Insecure: insecure, + }) + } + sort.Slice(allHosts, func(i, j int) bool { + return allHosts[i].Name < allHosts[j].Name + }) + r.allHosts = allHosts + return nil +} + +// AllHosts implements Resolver.AllHosts. +func (r *resolver) AllHosts() []Host { + return r.allHosts +} + +func (r *resolver) ResolveToLocation(mpath, vers string) (Location, bool) { + if mpath == "" { + return Location{}, false + } + bestMatch := "" + // Note: there's always a wildcard match. + bestMatchReg := r.cfg.DefaultRegistry + for pat, reg := range r.cfg.ModuleRegistries { + if pat == mpath { + bestMatch = pat + bestMatchReg = reg + break + } + if !strings.HasPrefix(mpath, pat) { + continue + } + if len(bestMatch) > len(pat) { + // We've already found a more specific match. + continue + } + if mpath[len(pat)] != '/' { + // The path doesn't have a separator at the end of + // the prefix, which means that it doesn't match. + // For example, foo.com/bar does not match foo.com/ba. + continue + } + // It's a possible match but not necessarily the longest one. + bestMatch, bestMatchReg = pat, reg + } + reg := bestMatchReg + if reg == nil || reg.none { + return Location{}, false + } + loc := Location{ + Host: reg.host, + Insecure: reg.insecure, + Tag: vers, + } + switch reg.PathEncoding { + case encPath: + if reg.StripPrefix { + mpath = strings.TrimPrefix(mpath, bestMatch) + mpath = strings.TrimPrefix(mpath, "/") + } + loc.Repository = path.Join(reg.repository, mpath) + case encHashAsRepo: + loc.Repository = fmt.Sprintf("%s/%x", reg.repository, sha256.Sum256([]byte(mpath))) + case encHashAsTag: + loc.Repository = reg.repository + default: + panic("unreachable") + } + if reg.PathEncoding == encHashAsTag { + loc.Tag = fmt.Sprintf("%s%x-%s", reg.PrefixForTags, sha256.Sum256([]byte(mpath)), vers) + } else { + loc.Tag = reg.PrefixForTags + vers + } + return loc, true +} + +func parseRegistry(env0 string) (*registryConfig, error) { + if env0 == "none" { + return ®istryConfig{ + Registry: env0, + none: true, + }, nil + } + env := env0 + var suffix string + if i := strings.LastIndex(env, "+"); i > 0 { + suffix = env[i:] + env = env[:i] + } + var r ociref.Reference + if !strings.Contains(env, "/") { + // OCI references don't allow a host name on its own without a repo, + // but we do. + r.Host = env + if !ociref.IsValidHost(r.Host) { + return nil, fmt.Errorf("invalid host name %q in registry", r.Host) + } + } else { + var err error + r, err = ociref.Parse(env) + if err != nil { + return nil, err + } + if r.Tag != "" || r.Digest != "" { + return nil, fmt.Errorf("cannot have an associated tag or digest") + } + } + if suffix == "" { + if isInsecureHost(r.Host) { + suffix = "+insecure" + } else { + suffix = "+secure" + } + } + insecure := false + switch suffix { + case "+insecure": + insecure = true + case "+secure": + default: + return nil, fmt.Errorf("unknown suffix (%q), need +insecure, +secure or no suffix)", suffix) + } + return ®istryConfig{ + Registry: env0, + PathEncoding: encPath, + host: r.Host, + repository: r.Repository, + insecure: insecure, + }, nil +} + +var ( + ipV4Localhost = netip.MustParseAddr("127.0.0.1") + ipV6Localhost = netip.MustParseAddr("::1") +) + +func isInsecureHost(hostPort string) bool { + host, _, err := net.SplitHostPort(hostPort) + if err != nil { + host = hostPort + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + host = host[1 : len(host)-1] + } + } + if host == "localhost" { + return true + } + addr, err := netip.ParseAddr(host) + if err != nil { + return false + } + // TODO other clients have logic for RFC1918 too, amongst other + // things. Maybe we should do that too. + return addr == ipV4Localhost || addr == ipV6Localhost +} diff --git a/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue b/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue new file mode 100644 index 0000000000..22931209d4 --- /dev/null +++ b/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue @@ -0,0 +1,110 @@ +// Copyright 2024 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This aspect of #registry encodes the defaults used by the resolver +// parser. It's kept separate because it's technically bad practice to +// define regular fields as part of a schema, and by defining it this +// way, the pure schema can be read independently as such. +// +// TODO work out a nice way of doing this such that we don't have to +// mirror the fields in #file that mention #registry +#registry: { + pathEncoding: *"path" | _ +} + +// Note: public part of schema (included in help output) starts +// at "// #file" below. + +// #file represents the registry configuration schema. +#file: { + // moduleRegistries specifies a mapping from module path prefix + // (excluding any version suffix) to the registry to be used for + // all modules under that path. + // + // A prefix is considered to match if a non-zero number of + // initial path elements (sequences of non-slash characters) in + // a module path match the prefix. + // + // If there are multiple matching prefixes, the longest + // is chosen. + moduleRegistries?: [#modulePath]: #registry + + // defaultRegistry specifies a fallback registry to be used if no + // prefix from moduleRegistry matches. + // If it's not present, a system default will be used. + defaultRegistry?: #registry +} + +#registry: { + // registry specifies the registry host name and optionally, the + // repository prefix to use for all modules in the repository, + // and the security to use when accessing the host. + // + // It is in the form: + // hostname[:port][/repoPrefix][+insecure] + // + // The hostname must be specified in square brackets if it's an + // IPv6 address. + // + // Connections will be secure unless explicitly specified + // otherwise, except for localhost connections which default to + // insecure. + // + // See the doc comment on pathEncoding for details as to how + // repoPrefix is used to determine the repository to use for a + // specific module. + // + // As a special case, the registry may be "none", indicating + // that there is no registry for its associated modules. + // If a module resolves to a "none" registry, the resolver + // will return an error. + // + // Examples: + // "localhost:1234" + // "myregistry.example/my-modules+secure" + // "none" + registry!: string + + // pathEncoding specifies how module versions map to + // repositories within a registry. + // Possible values are: + // - "path": the repository is used as a prefix to the unencoded + // module path. The version of the module is used as a tag. + // - "hashAsPath": the hex-encoded SHA256 hash of the path is + // used as a suffix to the above repository value. The version + // of the module is used as a tag. + // - "hashAsTag": the repository is used as is: the hex-encoded + // SHA256 hash of the path followed by a hyphen and the version + // is used as a tag. + pathEncoding?: "path" | "hashAsRepo" | "hashAsTag" + + // prefixForTags specifies an arbitrary prefix that's added to + // all tags. This can be used to disambiguate tags when there + // might be some possibility of confusion with tags in use for + // other purposes. + prefixForTags?: #tag + + // TODO we could encode the invariant below in CUE but that + // would result in poor error messages. With an error builtin, + // that could perhaps be improved. + + // stripPrefix specifies that the pattern prefix should be + // stripped from the module path before using as a repository + // path. This only applies when pathEncoding is "path". + stripPrefix?: bool +} + +// TODO more specific schemas below +#modulePath: string +#tag: string diff --git a/vendor/cuelang.org/go/internal/mod/module/module.go b/vendor/cuelang.org/go/internal/mod/module/module.go deleted file mode 100644 index 8ffeadaf3d..0000000000 --- a/vendor/cuelang.org/go/internal/mod/module/module.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package module defines the module.Version type along with support code. -// -// The module.Version type is a simple Path, Version pair: -// -// type Version struct { -// Path string -// Version string -// } -// -// There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that -// a particular path, version pair is valid. -package module - -// IMPORTANT NOTE -// -// This file essentially defines the set of valid import paths for the cue command. -// There are many subtle considerations, including Unicode ambiguity, -// security, network, and file system representations. - -import ( - "fmt" - "sort" - "strings" - - "cuelang.org/go/internal/mod/semver" -) - -// A Version (for clients, a module.Version) is defined by a module path and version pair. -// These are stored in their plain (unescaped) form. -// This type is comparable. -type Version struct { - path string - version string -} - -// Path returns the module path part of the Version, -// which always includes the major version suffix -// unless a module path, like "github.com/foo/bar@v0". -// Note that in general the path should include the major version suffix -// even though it's implied from the version. The Canonical -// method can be used to add the major version suffix if not present. -// The BasePath method can be used to obtain the path without -// the suffix. -func (m Version) Path() string { - return m.path -} - -func (m Version) Equal(m1 Version) bool { - return m.path == m1.path && m.version == m1.version -} - -func (m Version) BasePath() string { - basePath, _, ok := SplitPathVersion(m.path) - if !ok { - panic(fmt.Errorf("broken invariant: failed to split version in %q", m.path)) - } - return basePath -} - -func (m Version) Version() string { - return m.version -} - -// String returns the string form of the Version: -// (Path@Version, or just Path if Version is empty). -func (m Version) String() string { - if m.version == "" { - return m.path - } - return m.BasePath() + "@" + m.version -} - -func MustParseVersion(s string) Version { - v, err := ParseVersion(s) - if err != nil { - panic(err) - } - return v -} - -// ParseVersion parses a $module@$version -// string into a Version. -// The version must be canonical (i.e. it can't be -// just a major version). -func ParseVersion(s string) (Version, error) { - basePath, vers, ok := SplitPathVersion(s) - if !ok { - return Version{}, fmt.Errorf("invalid module path@version %q", s) - } - if semver.Canonical(vers) != vers { - return Version{}, fmt.Errorf("module version in %q is not canonical", s) - } - return Version{basePath + "@" + semver.Major(vers), vers}, nil -} - -func MustNewVersion(path string, vers string) Version { - v, err := NewVersion(path, vers) - if err != nil { - panic(err) - } - return v -} - -// NewVersion forms a Version from the given path and version. -// The version must be canonical, empty or "none". -// If the path doesn't have a major version suffix, one will be added -// if the version isn't empty; if the version is empty, it's an error. -func NewVersion(path string, vers string) (Version, error) { - if vers != "" && vers != "none" { - if !semver.IsValid(vers) { - return Version{}, fmt.Errorf("version %q (of module %q) is not well formed", vers, path) - } - if semver.Canonical(vers) != vers { - return Version{}, fmt.Errorf("version %q (of module %q) is not canonical", vers, path) - } - maj := semver.Major(vers) - _, vmaj, ok := SplitPathVersion(path) - if ok && maj != vmaj { - return Version{}, fmt.Errorf("mismatched major version suffix in %q (version %v)", path, vers) - } - if !ok { - fullPath := path + "@" + maj - if _, _, ok := SplitPathVersion(fullPath); !ok { - return Version{}, fmt.Errorf("cannot form version path from %q, version %v", path, vers) - } - path = fullPath - } - } else { - if _, _, ok := SplitPathVersion(path); !ok { - return Version{}, fmt.Errorf("path %q has no major version", path) - } - } - if vers == "" { - if err := CheckPath(path); err != nil { - return Version{}, err - } - } else { - if err := Check(path, vers); err != nil { - return Version{}, err - } - } - return Version{ - path: path, - version: vers, - }, nil -} - -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) -// optionally followed by a tie-breaking suffix introduced by a slash character, -// like in "v0.0.1/module.cue". -func Sort(list []Version) { - sort.Slice(list, func(i, j int) bool { - mi := list[i] - mj := list[j] - if mi.path != mj.path { - return mi.path < mj.path - } - // To help go.sum formatting, allow version/file. - // Compare semver prefix by semver rules, - // file by string order. - vi := mi.version - vj := mj.version - var fi, fj string - if k := strings.Index(vi, "/"); k >= 0 { - vi, fi = vi[:k], vi[k:] - } - if k := strings.Index(vj, "/"); k >= 0 { - vj, fj = vj[:k], vj[k:] - } - if vi != vj { - return semver.Compare(vi, vj) < 0 - } - return fi < fj - }) -} diff --git a/vendor/cuelang.org/go/internal/mod/mvs/graph.go b/vendor/cuelang.org/go/internal/mod/mvs/graph.go index bff8bba42e..6cab7c3ed2 100644 --- a/vendor/cuelang.org/go/internal/mod/mvs/graph.go +++ b/vendor/cuelang.org/go/internal/mod/mvs/graph.go @@ -6,9 +6,8 @@ package mvs import ( "fmt" + "slices" "sort" - - "cuelang.org/go/internal/slices" ) // Versions is an interface that should be provided by implementations diff --git a/vendor/cuelang.org/go/internal/mod/mvs/mvs.go b/vendor/cuelang.org/go/internal/mod/mvs/mvs.go index 0a9cd134a9..45d444f332 100644 --- a/vendor/cuelang.org/go/internal/mod/mvs/mvs.go +++ b/vendor/cuelang.org/go/internal/mod/mvs/mvs.go @@ -8,11 +8,11 @@ package mvs import ( "fmt" - "reflect" + "slices" "sort" "sync" - "cuelang.org/go/internal/mod/internal/par" + "cuelang.org/go/internal/par" ) // A Reqs is the requirement graph on which Minimal Version Selection (MVS) operates. @@ -72,7 +72,7 @@ type DowngradeReqs[V comparable] interface { // BuildList returns the build list for the target module. // -// target is the root vertex of a module requirement graph. For cmd/go, this is +// target is the root vertex of a module requirement graph. For cmd/cue, this is // typically the main module, but note that this algorithm is not intended to // be Go-specific: module paths and versions are treated as opaque values. // @@ -170,7 +170,7 @@ func buildList[V comparable](targets []V, reqs Reqs[V], upgrade func(V) (V, erro // The final list is the minimum version of each module found in the graph. list := g.BuildList() - if vs := list[:len(targets)]; !reflect.DeepEqual(vs, targets) { + if vs := list[:len(targets)]; !slices.Equal(vs, targets) { // target.Version will be "" for modload, the main client of MVS. // "" denotes the main module, which has no version. However, MVS treats // version strings as opaque, so "" is not a special value here. @@ -317,7 +317,7 @@ func Upgrade[V comparable](target V, reqs UpgradeReqs[V], upgrade ...V) ([]V, er } } - return buildList[V]([]V{target}, &override[V]{target, list, reqs}, func(m V) (V, error) { + return buildList([]V{target}, &override[V]{target, list, reqs}, func(m V) (V, error) { if v, ok := upgradeTo[reqs.Path(m)]; ok { return reqs.New(reqs.Path(m), v) } @@ -340,7 +340,7 @@ func Downgrade[V comparable](target V, reqs DowngradeReqs[V], downgrade ...V) ([ // // In order to generate those new requirements, we need to identify versions // for every module in the build list — not just reqs.Required(target). - list, err := BuildList[V]([]V{target}, reqs) + list, err := BuildList([]V{target}, reqs) if err != nil { return nil, err } @@ -460,7 +460,7 @@ List: // list with the actual versions of the downgraded modules as selected by MVS, // instead of our initial downgrades. // (See the downhiddenartifact and downhiddencross test cases). - actual, err := BuildList[V]([]V{target}, &override[V]{ + actual, err := BuildList([]V{target}, &override[V]{ target: target, list: downgraded, Reqs: reqs, @@ -485,7 +485,7 @@ List: } } - return BuildList[V]([]V{target}, &override[V]{ + return BuildList([]V{target}, &override[V]{ target: target, list: downgraded, Reqs: reqs, diff --git a/vendor/cuelang.org/go/internal/mod/semver/semver.go b/vendor/cuelang.org/go/internal/mod/semver/semver.go index a30a22bf20..e463c7de36 100644 --- a/vendor/cuelang.org/go/internal/mod/semver/semver.go +++ b/vendor/cuelang.org/go/internal/mod/semver/semver.go @@ -22,7 +22,10 @@ // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. package semver -import "sort" +import ( + "cmp" + "slices" +) // parsed returns the parsed form of a semantic version string. type parsed struct { @@ -151,22 +154,14 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. -type ByVersion []string - -func (vs ByVersion) Len() int { return len(vs) } -func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs ByVersion) Less(i, j int) bool { - cmp := Compare(vs[i], vs[j]) - if cmp != 0 { - return cmp < 0 - } - return vs[i] < vs[j] -} - -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings. func Sort(list []string) { - sort.Sort(ByVersion(list)) + slices.SortFunc(list, func(a, b string) int { + if c := Compare(a, b); c != 0 { + return c + } + return cmp.Compare(a, b) + }) } func parse(v string) (p parsed, ok bool) { diff --git a/vendor/cuelang.org/go/internal/mod/internal/par/queue.go b/vendor/cuelang.org/go/internal/par/queue.go similarity index 100% rename from vendor/cuelang.org/go/internal/mod/internal/par/queue.go rename to vendor/cuelang.org/go/internal/par/queue.go diff --git a/vendor/cuelang.org/go/internal/mod/internal/par/work.go b/vendor/cuelang.org/go/internal/par/work.go similarity index 100% rename from vendor/cuelang.org/go/internal/mod/internal/par/work.go rename to vendor/cuelang.org/go/internal/par/work.go diff --git a/vendor/cuelang.org/go/internal/pkg/context.go b/vendor/cuelang.org/go/internal/pkg/context.go index 033420de61..335c5b2ccf 100644 --- a/vendor/cuelang.org/go/internal/pkg/context.go +++ b/vendor/cuelang.org/go/internal/pkg/context.go @@ -18,11 +18,12 @@ import ( "io" "math/big" + "github.com/cockroachdb/apd/v3" + "cuelang.org/go/cue" "cuelang.org/go/cue/token" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/value" - "github.com/cockroachdb/apd/v3" ) // CallCtxt is passed to builtin implementations that need to use a cue.Value. This is an internal type. Its interface may change. @@ -129,11 +130,12 @@ func (c *CallCtxt) uintValue(i, bits int, typ string) uint64 { func (c *CallCtxt) Decimal(i int) *apd.Decimal { x := value.Make(c.ctx, c.args[i]) - if _, err := x.MantExp(nil); err != nil { + res, err := x.Decimal() + if err != nil { c.invalidArgType(c.args[i], i, "Decimal", err) return nil } - return &c.args[i].(*adt.Num).X + return res } func (c *CallCtxt) Float64(i int) float64 { diff --git a/vendor/cuelang.org/go/internal/pkg/types.go b/vendor/cuelang.org/go/internal/pkg/types.go index 06871a25b3..c2535a7b10 100644 --- a/vendor/cuelang.org/go/internal/pkg/types.go +++ b/vendor/cuelang.org/go/internal/pkg/types.go @@ -50,7 +50,7 @@ func (s *Struct) Arcs() []*adt.Vertex { func (s *Struct) Len() int { count := 0 for _, a := range s.Arcs() { - if a.Label.IsString() { + if a.Label.IsString() && !s.node.IsOptional(a.Label) { count++ } } @@ -62,6 +62,12 @@ func (s *Struct) IsOpen() bool { if !s.node.IsClosedStruct() { return true } + // Technically this is not correct, but it is in the context of where + // it is used. + if s.node.PatternConstraints != nil && len(s.node.PatternConstraints.Pairs) > 0 { + return true + } + // The equivalent code for the old implementation. ot := s.node.OptionalTypes() if ot&^adt.HasDynamic != 0 { return true diff --git a/vendor/cuelang.org/go/internal/slices/slices.go b/vendor/cuelang.org/go/internal/slices/slices.go deleted file mode 100644 index a0adcf4926..0000000000 --- a/vendor/cuelang.org/go/internal/slices/slices.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: Replace with slices package when it lands in standard library. - -package slices - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} diff --git a/vendor/cuelang.org/go/internal/source/source.go b/vendor/cuelang.org/go/internal/source/source.go index 38ae7b6ec3..8ba8c96084 100644 --- a/vendor/cuelang.org/go/internal/source/source.go +++ b/vendor/cuelang.org/go/internal/source/source.go @@ -21,27 +21,28 @@ import ( "fmt" "io" "os" + "strings" ) -// Read loads the source bytes for the given arguments. If src != nil, -// Read converts src to a []byte if possible; otherwise it returns an -// error. If src == nil, readSource returns the result of reading the file +// ReadAll loads the source bytes for the given arguments. If src != nil, +// ReadAll converts src to a []byte if possible; otherwise it returns an +// error. If src == nil, ReadAll returns the result of reading the file // specified by filename. -func Read(filename string, src interface{}) ([]byte, error) { +func ReadAll(filename string, src any) ([]byte, error) { if src != nil { - switch s := src.(type) { + switch src := src.(type) { case string: - return []byte(s), nil + return []byte(src), nil case []byte: - return s, nil + return src, nil case *bytes.Buffer: // is io.Reader, but src is already available in []byte form - if s != nil { - return s.Bytes(), nil + if src != nil { + return src.Bytes(), nil } case io.Reader: var buf bytes.Buffer - if _, err := io.Copy(&buf, s); err != nil { + if _, err := io.Copy(&buf, src); err != nil { return nil, err } return buf.Bytes(), nil @@ -50,3 +51,22 @@ func Read(filename string, src interface{}) ([]byte, error) { } return os.ReadFile(filename) } + +// Open creates a source reader for the given arguments. If src != nil, +// Open converts src to an io.Open if possible; otherwise it returns an +// error. If src == nil, Open returns the result of opening the file +// specified by filename. +func Open(filename string, src any) (io.ReadCloser, error) { + if src != nil { + switch src := src.(type) { + case string: + return io.NopCloser(strings.NewReader(src)), nil + case []byte: + return io.NopCloser(bytes.NewReader(src)), nil + case io.Reader: + return io.NopCloser(src), nil + } + return nil, fmt.Errorf("invalid source type %T", src) + } + return os.Open(filename) +} diff --git a/vendor/cuelang.org/go/internal/task/task.go b/vendor/cuelang.org/go/internal/task/task.go index 1c2475fb6a..78885d328c 100644 --- a/vendor/cuelang.org/go/internal/task/task.go +++ b/vendor/cuelang.org/go/internal/task/task.go @@ -23,6 +23,7 @@ import ( "cuelang.org/go/cue" "cuelang.org/go/cue/errors" "cuelang.org/go/cue/token" + "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/value" ) @@ -38,7 +39,7 @@ type Context struct { } func (c *Context) Lookup(field string) cue.Value { - f := c.Obj.Lookup(field) + f := c.Obj.LookupPath(cue.MakePath(cue.Str(field))) if !f.Exists() { c.addErr(f, nil, "could not find field %q", field) return cue.Value{} @@ -50,7 +51,7 @@ func (c *Context) Lookup(field string) cue.Value { } func (c *Context) Int64(field string) int64 { - f := c.Obj.Lookup(field) + f := c.Obj.LookupPath(cue.MakePath(cue.Str(field))) value, err := f.Int64() if err != nil { c.addErr(f, err, "invalid integer argument") @@ -60,7 +61,7 @@ func (c *Context) Int64(field string) int64 { } func (c *Context) String(field string) string { - f := c.Obj.Lookup(field) + f := c.Obj.LookupPath(cue.MakePath(cue.Str(field))) value, err := f.String() if err != nil { c.addErr(f, err, "invalid string argument") @@ -70,7 +71,7 @@ func (c *Context) String(field string) string { } func (c *Context) Bytes(field string) []byte { - f := c.Obj.Lookup(field) + f := c.Obj.LookupPath(cue.MakePath(cue.Str(field))) value, err := f.Bytes() if err != nil { c.addErr(f, err, "invalid bytes argument") @@ -113,11 +114,12 @@ func (t *taskError) Position() token.Pos { func (t *taskError) InputPositions() (a []token.Pos) { _, nx := value.ToInternal(t.v) - for _, x := range nx.Conjuncts { + nx.VisitLeafConjuncts(func(x adt.Conjunct) bool { if src := x.Source(); src != nil { a = append(a, src.Pos()) } - } + return true + }) return a } diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/decode.go b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go index c848c41e93..290e77f6cf 100644 --- a/vendor/cuelang.org/go/internal/third_party/yaml/decode.go +++ b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go @@ -1,22 +1,17 @@ package yaml import ( - "bytes" "encoding/base64" - "errors" "fmt" - "io" "math" - "os" - "reflect" "strconv" "strings" - "time" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/literal" "cuelang.org/go/cue/token" "cuelang.org/go/internal" + "cuelang.org/go/internal/source" ) const ( @@ -48,36 +43,11 @@ type parser struct { event yaml_event_t doc *node info *token.File - last *node doneInit bool } -func readSource(filename string, src interface{}) ([]byte, error) { - if src != nil { - switch s := src.(type) { - case string: - return []byte(s), nil - case []byte: - return s, nil - case *bytes.Buffer: - // is io.Reader, but src is already available in []byte form - if s != nil { - return s.Bytes(), nil - } - case io.Reader: - var buf bytes.Buffer - if _, err := io.Copy(&buf, s); err != nil { - return nil, err - } - return buf.Bytes(), nil - } - return nil, errors.New("invalid source") - } - return os.ReadFile(filename) -} - func newParser(filename string, src interface{}) (*parser, error) { - b, err := readSource(filename, src) + b, err := source.ReadAll(filename, src) if err != nil { return nil, err } @@ -266,17 +236,9 @@ type decoder struct { aliases map[*node]bool terrors []string prev token.Pos - lastNode ast.Node forceNewline bool } -var ( - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - func newDecoder(p *parser) *decoder { d := &decoder{p: p} d.aliases = make(map[*node]bool) @@ -337,7 +299,7 @@ func (d *decoder) attachDocComments(m yaml_mark_t, pos int8, expr ast.Node) { line = c.mark.line } if len(comments) > 0 { - expr.AddComment(&ast.CommentGroup{ + ast.AddComment(expr, &ast.CommentGroup{ Doc: pos == 0 && line+1 == m.line, Position: pos, List: comments, @@ -355,7 +317,7 @@ func (d *decoder) attachLineComment(m yaml_mark_t, pos int8, expr ast.Node) { Slash: d.pos(c.mark), Text: "//" + c.text[1:], } - expr.AddComment(&ast.CommentGroup{ + ast.AddComment(expr, &ast.CommentGroup{ Line: true, Position: pos, List: []*ast.Comment{comment}, @@ -428,8 +390,6 @@ func (d *decoder) alias(n *node) ast.Expr { return node } -var zeroValue reflect.Value - func (d *decoder) scalar(n *node) ast.Expr { var tag string var resolved interface{} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go index 771a4af0b9..fa1caccdda 100644 --- a/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go +++ b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go @@ -3,16 +3,13 @@ // Source code and other details for the project are available at GitHub: // // https://github.com/go-yaml/yaml -package yaml // import "cuelang.org/go/internal/third_party/yaml" +package yaml import ( - "errors" "fmt" "io" - "reflect" "strconv" "strings" - "sync" "cuelang.org/go/cue/ast" ) @@ -113,145 +110,3 @@ type TypeError struct { func (e *TypeError) Error() string { return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) } - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/cuelang.org/go/mod/modcache/cache.go b/vendor/cuelang.org/go/mod/modcache/cache.go new file mode 100644 index 0000000000..c6dd1178e2 --- /dev/null +++ b/vendor/cuelang.org/go/mod/modcache/cache.go @@ -0,0 +1,173 @@ +// Package modcache provides a file-based cache for modules. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL. +// ITS API MAY CHANGE AT ANY TIME. +package modcache + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/rogpeppe/go-internal/lockedfile" + "github.com/rogpeppe/go-internal/robustio" + + "cuelang.org/go/mod/module" +) + +var errNotCached = fmt.Errorf("not in cache") + +// readDiskModFile reads a cached go.mod file from disk, +// returning the name of the cache file and the result. +// If the read fails, the caller can use +// writeDiskModFile(file, data) to write a new cache entry. +func (c *cache) readDiskModFile(mv module.Version) (file string, data []byte, err error) { + return c.readDiskCache(mv, "mod") +} + +// writeDiskModFile writes a cue.mod/module.cue cache entry. +// The file name must have been returned by a previous call to readDiskModFile. +func (c *cache) writeDiskModFile(ctx context.Context, file string, text []byte) error { + return c.writeDiskCache(ctx, file, text) +} + +// readDiskCache is the generic "read from a cache file" implementation. +// It takes the revision and an identifying suffix for the kind of data being cached. +// It returns the name of the cache file and the content of the file. +// If the read fails, the caller can use +// writeDiskCache(file, data) to write a new cache entry. +func (c *cache) readDiskCache(mv module.Version, suffix string) (file string, data []byte, err error) { + file, err = c.cachePath(mv, suffix) + if err != nil { + return "", nil, errNotCached + } + data, err = robustio.ReadFile(file) + if err != nil { + return file, nil, errNotCached + } + return file, data, nil +} + +// writeDiskCache is the generic "write to a cache file" implementation. +// The file must have been returned by a previous call to readDiskCache. +func (c *cache) writeDiskCache(ctx context.Context, file string, data []byte) error { + if file == "" { + return nil + } + // Make sure directory for file exists. + if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { + return err + } + + // Write the file to a temporary location, and then rename it to its final + // path to reduce the likelihood of a corrupt file existing at that final path. + f, err := tempFile(ctx, filepath.Dir(file), filepath.Base(file), 0666) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // the rename completed. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := f.Write(data); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := robustio.Rename(f.Name(), file); err != nil { + return err + } + return nil +} + +// downloadDir returns the directory for storing. +// An error will be returned if the module path or version cannot be escaped. +// An error satisfying errors.Is(err, fs.ErrNotExist) will be returned +// along with the directory if the directory does not exist or if the directory +// is not completely populated. +func (c *cache) downloadDir(m module.Version) (string, error) { + if !m.IsCanonical() { + return "", fmt.Errorf("non-semver module version %q", m.Version()) + } + enc, err := module.EscapePath(m.BasePath()) + if err != nil { + return "", err + } + encVer, err := module.EscapeVersion(m.Version()) + if err != nil { + return "", err + } + + // Check whether the directory itself exists. + dir := filepath.Join(c.dir, "extract", enc+"@"+encVer) + if fi, err := os.Stat(dir); os.IsNotExist(err) { + return dir, err + } else if err != nil { + return dir, &downloadDirPartialError{dir, err} + } else if !fi.IsDir() { + return dir, &downloadDirPartialError{dir, errors.New("not a directory")} + } + + // Check if a .partial file exists. This is created at the beginning of + // a download and removed after the zip is extracted. + partialPath, err := c.cachePath(m, "partial") + if err != nil { + return dir, err + } + if _, err := os.Stat(partialPath); err == nil { + return dir, &downloadDirPartialError{dir, errors.New("not completely extracted")} + } else if !os.IsNotExist(err) { + return dir, err + } + return dir, nil +} + +func (c *cache) cachePath(m module.Version, suffix string) (string, error) { + if !m.IsValid() || m.Version() == "" { + return "", fmt.Errorf("non-semver module version %q", m) + } + esc, err := module.EscapePath(m.BasePath()) + if err != nil { + return "", err + } + encVer, err := module.EscapeVersion(m.Version()) + if err != nil { + return "", err + } + return filepath.Join(c.dir, "download", esc, "/@v", encVer+"."+suffix), nil +} + +// downloadDirPartialError is returned by DownloadDir if a module directory +// exists but was not completely populated. +// +// downloadDirPartialError is equivalent to fs.ErrNotExist. +type downloadDirPartialError struct { + Dir string + Err error +} + +func (e *downloadDirPartialError) Error() string { return fmt.Sprintf("%s: %v", e.Dir, e.Err) } +func (e *downloadDirPartialError) Is(err error) bool { return err == fs.ErrNotExist } + +// lockVersion locks a file within the module cache that guards the downloading +// and extraction of module data for the given module version. +func (c *cache) lockVersion(mod module.Version) (unlock func(), err error) { + path, err := c.cachePath(mod, "lock") + if err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return nil, err + } + return lockedfile.MutexAt(path).Lock() +} diff --git a/vendor/cuelang.org/go/mod/modcache/fetch.go b/vendor/cuelang.org/go/mod/modcache/fetch.go new file mode 100644 index 0000000000..fa167943b5 --- /dev/null +++ b/vendor/cuelang.org/go/mod/modcache/fetch.go @@ -0,0 +1,367 @@ +package modcache + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "log" + "math/rand" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/rogpeppe/go-internal/robustio" + + "cuelang.org/go/internal/mod/modload" + "cuelang.org/go/internal/par" + "cuelang.org/go/mod/modfile" + "cuelang.org/go/mod/modregistry" + "cuelang.org/go/mod/module" + "cuelang.org/go/mod/modzip" +) + +const logging = false // TODO hook this up to CUE_DEBUG + +// New returns r wrapped inside a caching layer that +// stores persistent cached content inside the given +// OS directory, typically ${CUE_CACHE_DIR}. +// +// The `module.SourceLoc.FS` fields in the locations +// returned by the registry implement the `OSRootFS` interface, +// allowing a caller to find the native OS filepath where modules +// are stored. +func New(registry *modregistry.Client, dir string) (modload.Registry, error) { + info, err := os.Stat(dir) + if err == nil && !info.IsDir() { + return nil, fmt.Errorf("%q is not a directory", dir) + } + return &cache{ + dir: filepath.Join(dir, "mod"), + reg: registry, + }, nil +} + +type cache struct { + dir string // typically ${CUE_CACHE_DIR}/mod + reg *modregistry.Client + downloadZipCache par.ErrCache[module.Version, string] + modFileCache par.ErrCache[string, []byte] +} + +func (c *cache) Requirements(ctx context.Context, mv module.Version) ([]module.Version, error) { + data, err := c.downloadModFile(ctx, mv) + if err != nil { + return nil, err + } + mf, err := modfile.Parse(data, mv.String()) + if err != nil { + return nil, fmt.Errorf("cannot parse module file from %v: %v", mv, err) + } + return mf.DepVersions(), nil +} + +// Fetch returns the location of the contents for the given module +// version, downloading it if necessary. +func (c *cache) Fetch(ctx context.Context, mv module.Version) (module.SourceLoc, error) { + dir, err := c.downloadDir(mv) + if err == nil { + // The directory has already been completely extracted (no .partial file exists). + return c.dirToLocation(dir), nil + } + if dir == "" || !errors.Is(err, fs.ErrNotExist) { + return module.SourceLoc{}, err + } + + // To avoid cluttering the cache with extraneous files, + // DownloadZip uses the same lockfile as Download. + // Invoke DownloadZip before locking the file. + zipfile, err := c.downloadZip(ctx, mv) + if err != nil { + return module.SourceLoc{}, err + } + + unlock, err := c.lockVersion(mv) + if err != nil { + return module.SourceLoc{}, err + } + defer unlock() + + // Check whether the directory was populated while we were waiting on the lock. + _, dirErr := c.downloadDir(mv) + if dirErr == nil { + return c.dirToLocation(dir), nil + } + _, dirExists := dirErr.(*downloadDirPartialError) + + // Clean up any partially extracted directories (indicated by + // DownloadDirPartialError, usually because of a .partial file). This is only + // safe to do because the lock file ensures that their writers are no longer + // active. + parentDir := filepath.Dir(dir) + tmpPrefix := filepath.Base(dir) + ".tmp-" + + entries, _ := os.ReadDir(parentDir) + for _, entry := range entries { + if strings.HasPrefix(entry.Name(), tmpPrefix) { + RemoveAll(filepath.Join(parentDir, entry.Name())) // best effort + } + } + if dirExists { + if err := RemoveAll(dir); err != nil { + return module.SourceLoc{}, err + } + } + + partialPath, err := c.cachePath(mv, "partial") + if err != nil { + return module.SourceLoc{}, err + } + + // Extract the module zip directory at its final location. + // + // To prevent other processes from reading the directory if we crash, + // create a .partial file before extracting the directory, and delete + // the .partial file afterward (all while holding the lock). + // + // A technique used previously was to extract to a temporary directory with a random name + // then rename it into place with os.Rename. On Windows, this can fail with + // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner) + // opened files in the temporary directory. + if err := os.MkdirAll(parentDir, 0777); err != nil { + return module.SourceLoc{}, err + } + if err := os.WriteFile(partialPath, nil, 0666); err != nil { + return module.SourceLoc{}, err + } + if err := modzip.Unzip(dir, mv, zipfile); err != nil { + if rmErr := RemoveAll(dir); rmErr == nil { + os.Remove(partialPath) + } + return module.SourceLoc{}, err + } + if err := os.Remove(partialPath); err != nil { + return module.SourceLoc{}, err + } + makeDirsReadOnly(dir) + return c.dirToLocation(dir), nil +} + +// ModuleVersions implements [modload.Registry.ModuleVersions]. +func (c *cache) ModuleVersions(ctx context.Context, mpath string) ([]string, error) { + // TODO should this do any kind of short-term caching? + return c.reg.ModuleVersions(ctx, mpath) +} + +func (c *cache) downloadZip(ctx context.Context, mv module.Version) (zipfile string, err error) { + return c.downloadZipCache.Do(mv, func() (string, error) { + zipfile, err := c.cachePath(mv, "zip") + if err != nil { + return "", err + } + + // Return without locking if the zip file exists. + if _, err := os.Stat(zipfile); err == nil { + return zipfile, nil + } + logf("cue: downloading %s", mv) + unlock, err := c.lockVersion(mv) + if err != nil { + return "", err + } + defer unlock() + + if err := c.downloadZip1(ctx, mv, zipfile); err != nil { + return "", err + } + return zipfile, nil + }) +} + +func (c *cache) downloadZip1(ctx context.Context, mod module.Version, zipfile string) (err error) { + // Double-check that the zipfile was not created while we were waiting for + // the lock in downloadZip. + if _, err := os.Stat(zipfile); err == nil { + return nil + } + + // Create parent directories. + if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { + return err + } + + // Clean up any remaining tempfiles from previous runs. + // This is only safe to do because the lock file ensures that their + // writers are no longer active. + tmpPattern := filepath.Base(zipfile) + "*.tmp" + if old, err := filepath.Glob(filepath.Join(quoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil { + for _, path := range old { + os.Remove(path) // best effort + } + } + + // From here to the os.Rename call below is functionally almost equivalent to + // renameio.WriteToFile. We avoid using that so that we have control over the + // names of the temporary files (see the cleanup above) and to avoid adding + // renameio as an extra dependency. + f, err := tempFile(ctx, filepath.Dir(zipfile), filepath.Base(zipfile), 0666) + if err != nil { + return err + } + defer func() { + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + // TODO cache the result of GetModule so we don't have to do + // an extra round trip when we've already fetched the module file. + m, err := c.reg.GetModule(ctx, mod) + if err != nil { + return err + } + r, err := m.GetZip(ctx) + if err != nil { + return err + } + defer r.Close() + if _, err := io.Copy(f, r); err != nil { + return fmt.Errorf("failed to get module zip contents: %v", err) + } + if err := f.Close(); err != nil { + return err + } + if err := os.Rename(f.Name(), zipfile); err != nil { + return err + } + // TODO should we check the zip file for well-formedness? + // TODO: Should we make the .zip file read-only to discourage tampering? + return nil +} + +func (c *cache) downloadModFile(ctx context.Context, mod module.Version) ([]byte, error) { + return c.modFileCache.Do(mod.String(), func() ([]byte, error) { + modfile, data, err := c.readDiskModFile(mod) + if err == nil { + return data, nil + } + logf("cue: downloading %s", mod) + unlock, err := c.lockVersion(mod) + if err != nil { + return nil, err + } + defer unlock() + // Double-check that the file hasn't been created while we were + // acquiring the lock. + _, data, err = c.readDiskModFile(mod) + if err == nil { + return data, nil + } + return c.downloadModFile1(ctx, mod, modfile) + }) +} + +func (c *cache) downloadModFile1(ctx context.Context, mod module.Version, modfile string) ([]byte, error) { + m, err := c.reg.GetModule(ctx, mod) + if err != nil { + return nil, err + } + data, err := m.ModuleFile(ctx) + if err != nil { + return nil, err + } + if err := c.writeDiskModFile(ctx, modfile, data); err != nil { + return nil, err + } + return data, nil +} + +func (c *cache) dirToLocation(fpath string) module.SourceLoc { + return module.SourceLoc{ + FS: module.OSDirFS(fpath), + Dir: ".", + } +} + +// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir +// and its transitive contents. +func makeDirsReadOnly(dir string) { + type pathMode struct { + path string + mode fs.FileMode + } + var dirs []pathMode // in lexical order + filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err == nil && d.IsDir() { + info, err := d.Info() + if err == nil && info.Mode()&0222 != 0 { + dirs = append(dirs, pathMode{path, info.Mode()}) + } + } + return nil + }) + + // Run over list backward to chmod children before parents. + for i := len(dirs) - 1; i >= 0; i-- { + os.Chmod(dirs[i].path, dirs[i].mode&^0222) + } +} + +// RemoveAll removes a directory written by the cache, first applying +// any permission changes needed to do so. +func RemoveAll(dir string) error { + // Module cache has 0555 directories; make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + if err != nil { + return nil // ignore errors walking in file system + } + if info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return robustio.RemoveAll(dir) +} + +// quoteGlob returns s with all Glob metacharacters quoted. +// We don't try to handle backslash here, as that can appear in a +// file path on Windows. +func quoteGlob(s string) string { + if !strings.ContainsAny(s, `*?[]`) { + return s + } + var sb strings.Builder + for _, c := range s { + switch c { + case '*', '?', '[', ']': + sb.WriteByte('\\') + } + sb.WriteRune(c) + } + return sb.String() +} + +// tempFile creates a new temporary file with given permission bits. +func tempFile(ctx context.Context, dir, prefix string, perm fs.FileMode) (f *os.File, err error) { + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+".tmp") + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if os.IsExist(err) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + break + } + return +} + +func logf(f string, a ...any) { + if logging { + log.Printf(f, a...) + } +} diff --git a/vendor/cuelang.org/go/mod/modconfig/modconfig.go b/vendor/cuelang.org/go/mod/modconfig/modconfig.go new file mode 100644 index 0000000000..01102451a2 --- /dev/null +++ b/vendor/cuelang.org/go/mod/modconfig/modconfig.go @@ -0,0 +1,405 @@ +// Package modconfig provides access to the standard CUE +// module configuration, including registry access and authorization. +package modconfig + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "strings" + "sync" + + "cuelabs.dev/go/oci/ociregistry" + "cuelabs.dev/go/oci/ociregistry/ociauth" + "cuelabs.dev/go/oci/ociregistry/ociclient" + "golang.org/x/oauth2" + + "cuelang.org/go/internal/cueconfig" + "cuelang.org/go/internal/cueversion" + "cuelang.org/go/internal/mod/modload" + "cuelang.org/go/internal/mod/modresolve" + "cuelang.org/go/mod/modcache" + "cuelang.org/go/mod/modregistry" + "cuelang.org/go/mod/module" +) + +// Registry is used to access CUE modules from external sources. +type Registry interface { + // Requirements returns a list of the modules required by the given module + // version. + Requirements(ctx context.Context, m module.Version) ([]module.Version, error) + + // Fetch returns the location of the contents for the given module + // version, downloading it if necessary. + Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error) + + // ModuleVersions returns all the versions for the module with the + // given path, which should contain a major version. + ModuleVersions(ctx context.Context, mpath string) ([]string, error) +} + +// We don't want to make modload part of the cue/load API, +// so we define the above type independently, but we want +// it to be interchangeable, so check that statically here. +var ( + _ Registry = modload.Registry(nil) + _ modload.Registry = Registry(nil) +) + +// DefaultRegistry is the default registry host. +const DefaultRegistry = "registry.cue.works" + +// Resolver implements [modregistry.Resolver] in terms of the +// CUE registry configuration file and auth configuration. +type Resolver struct { + resolver modresolve.LocationResolver + newRegistry func(host string, insecure bool) (ociregistry.Interface, error) + + mu sync.Mutex + registries map[string]ociregistry.Interface +} + +// Config provides the starting point for the configuration. +type Config struct { + // TODO allow for a custom resolver to be passed in. + + // Transport is used to make the underlying HTTP requests. + // If it's nil, [http.DefaultTransport] will be used. + Transport http.RoundTripper + + // Env provides environment variable values. If this is nil, + // the current process's environment will be used. + Env []string + + // ClientType is used as part of the User-Agent header + // that's added in each outgoing HTTP request. + // If it's empty, it defaults to "cuelang.org/go". + ClientType string +} + +// NewResolver returns an implementation of [modregistry.Resolver] +// that uses cfg to guide registry resolution. If cfg is nil, it's +// equivalent to passing pointer to a zero Config struct. +// +// It consults the same environment variables used by the +// cue command. +// +// The contents of the configuration will not be mutated. +func NewResolver(cfg *Config) (*Resolver, error) { + cfg = newRef(cfg) + cfg.Transport = cueversion.NewTransport(cfg.ClientType, cfg.Transport) + getenv := getenvFunc(cfg.Env) + var configData []byte + var configPath string + cueRegistry := getenv("CUE_REGISTRY") + kind, rest, _ := strings.Cut(cueRegistry, ":") + switch kind { + case "file": + data, err := os.ReadFile(rest) + if err != nil { + return nil, err + } + configData, configPath = data, rest + case "inline": + configData, configPath = []byte(rest), "$CUE_REGISTRY" + case "simple": + cueRegistry = rest + } + var resolver modresolve.LocationResolver + var err error + if configPath != "" { + resolver, err = modresolve.ParseConfig(configData, configPath, DefaultRegistry) + } else { + resolver, err = modresolve.ParseCUERegistry(cueRegistry, DefaultRegistry) + } + if err != nil { + return nil, fmt.Errorf("bad value for $CUE_REGISTRY: %v", err) + } + return &Resolver{ + resolver: resolver, + newRegistry: func(host string, insecure bool) (ociregistry.Interface, error) { + return ociclient.New(host, &ociclient.Options{ + Insecure: insecure, + Transport: &cueLoginsTransport{ + getenv: getenv, + cfg: cfg, + }, + }) + }, + registries: make(map[string]ociregistry.Interface), + }, nil +} + +// Host represents a registry host name and whether +// it should be accessed via a secure connection or not. +type Host = modresolve.Host + +// AllHosts returns all the registry hosts that the resolver might resolve to, +// ordered lexically by hostname. +func (r *Resolver) AllHosts() []Host { + return r.resolver.AllHosts() +} + +// HostLocation represents a registry host and a location with it. +type HostLocation = modresolve.Location + +// ResolveToLocation returns the host location for the given module path and version +// without creating a Registry instance for it. +func (r *Resolver) ResolveToLocation(mpath string, version string) (HostLocation, bool) { + return r.resolver.ResolveToLocation(mpath, version) +} + +// ResolveToRegistry implements [modregistry.Resolver.ResolveToRegistry]. +func (r *Resolver) ResolveToRegistry(mpath string, version string) (modregistry.RegistryLocation, error) { + loc, ok := r.resolver.ResolveToLocation(mpath, version) + if !ok { + // This can happen when mpath is invalid, which should not + // happen in practice, as the only caller is modregistry which + // vets module paths before calling Resolve. + // + // It can also happen when the user has explicitly configured a "none" + // registry to avoid falling back to a default registry. + return modregistry.RegistryLocation{}, fmt.Errorf("cannot resolve %s (version %q) to registry: %w", mpath, version, modregistry.ErrRegistryNotFound) + } + r.mu.Lock() + defer r.mu.Unlock() + reg := r.registries[loc.Host] + if reg == nil { + reg1, err := r.newRegistry(loc.Host, loc.Insecure) + if err != nil { + return modregistry.RegistryLocation{}, fmt.Errorf("cannot make client: %v", err) + } + r.registries[loc.Host] = reg1 + reg = reg1 + } + return modregistry.RegistryLocation{ + Registry: reg, + Repository: loc.Repository, + Tag: loc.Tag, + }, nil +} + +// cueLoginsTransport implements [http.RoundTripper] by using +// tokens from the CUE login information when available, falling +// back to using the standard [ociauth] transport implementation. +type cueLoginsTransport struct { + cfg *Config + getenv func(string) string + + // initOnce guards initErr, logins, and transport. + initOnce sync.Once + initErr error + // loginsMu guards the logins pointer below. + // Note that an instance of cueconfig.Logins is read-only and + // does not have to be guarded. + loginsMu sync.Mutex + logins *cueconfig.Logins + // transport holds the underlying transport. This wraps + // t.cfg.Transport. + transport http.RoundTripper + + // mu guards the fields below. + mu sync.Mutex + + // cachedTransports holds a transport per host. + // This is needed because the oauth2 API requires a + // different client for each host. Each of these transports + // wraps the transport above. + cachedTransports map[string]http.RoundTripper +} + +func (t *cueLoginsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Return an error lazily on the first request because if the + // user isn't doing anything that requires a registry, we + // shouldn't complain about reading a bad configuration file. + if err := t.init(); err != nil { + return nil, err + } + + t.loginsMu.Lock() + logins := t.logins + t.loginsMu.Unlock() + + if logins == nil { + return t.transport.RoundTrip(req) + } + // TODO: note that a CUE registry may include a path prefix, + // so using solely the host will not work with such a path. + // Can we do better here, perhaps keeping the path prefix up to "/v2/"? + host := req.URL.Host + login, ok := logins.Registries[host] + if !ok { + return t.transport.RoundTrip(req) + } + + t.mu.Lock() + transport := t.cachedTransports[host] + if transport == nil { + tok := cueconfig.TokenFromLogin(login) + oauthCfg := cueconfig.RegistryOAuthConfig(Host{ + Name: host, + Insecure: req.URL.Scheme == "http", + }) + + // Make the oauth client use the transport that was set up + // in init. + ctx := context.WithValue(req.Context(), oauth2.HTTPClient, &http.Client{ + Transport: t.transport, + }) + transport = oauth2.NewClient(ctx, + &cachingTokenSource{ + updateFunc: func(tok *oauth2.Token) error { + return t.updateLogin(host, tok) + }, + base: oauthCfg.TokenSource(ctx, tok), + t: tok, + }, + ).Transport + t.cachedTransports[host] = transport + } + // Unlock immediately so we don't hold the lock for the entire + // request, which would preclude any concurrency when + // making HTTP requests. + t.mu.Unlock() + return transport.RoundTrip(req) +} + +func (t *cueLoginsTransport) updateLogin(host string, new *oauth2.Token) error { + // Reload the logins file in case another process changed it in the meantime. + loginsPath, err := cueconfig.LoginConfigPath(t.getenv) + if err != nil { + // TODO: this should never fail. Log a warning. + return nil + } + + // Lock the logins for the entire duration of the update to avoid races + t.loginsMu.Lock() + defer t.loginsMu.Unlock() + + logins, err := cueconfig.UpdateRegistryLogin(loginsPath, host, new) + if err != nil { + return err + } + + t.logins = logins + + return nil +} + +func (t *cueLoginsTransport) init() error { + t.initOnce.Do(func() { + t.initErr = t._init() + }) + return t.initErr +} + +func (t *cueLoginsTransport) _init() error { + // If a registry was authenticated via `cue login`, use that. + // If not, fall back to authentication via Docker's config.json. + // Note that the order below is backwards, since we layer interfaces. + + config, err := ociauth.LoadWithEnv(nil, t.cfg.Env) + if err != nil { + return fmt.Errorf("cannot load OCI auth configuration: %v", err) + } + t.transport = ociauth.NewStdTransport(ociauth.StdTransportParams{ + Config: config, + Transport: t.cfg.Transport, + }) + + // If we can't locate a logins.json file at all, then we'll continue. + // We only refuse to continue if we find an invalid logins.json file. + loginsPath, err := cueconfig.LoginConfigPath(t.getenv) + if err != nil { + // TODO: this should never fail. Log a warning. + return nil + } + logins, err := cueconfig.ReadLogins(loginsPath) + if errors.Is(err, fs.ErrNotExist) { + return nil + } + if err != nil { + return fmt.Errorf("cannot load CUE registry logins: %v", err) + } + t.logins = logins + t.cachedTransports = make(map[string]http.RoundTripper) + return nil +} + +// NewRegistry returns an implementation of the Registry +// interface suitable for passing to [load.Instances]. +// It uses the standard CUE cache directory. +func NewRegistry(cfg *Config) (Registry, error) { + cfg = newRef(cfg) + resolver, err := NewResolver(cfg) + if err != nil { + return nil, err + } + cacheDir, err := cueconfig.CacheDir(getenvFunc(cfg.Env)) + if err != nil { + return nil, err + } + return modcache.New(modregistry.NewClientWithResolver(resolver), cacheDir) +} + +func getenvFunc(env []string) func(string) string { + if env == nil { + return os.Getenv + } + return func(key string) string { + for i := len(env) - 1; i >= 0; i-- { + if e := env[i]; len(e) >= len(key)+1 && e[len(key)] == '=' && e[:len(key)] == key { + return e[len(key)+1:] + } + } + return "" + } +} + +func newRef[T any](x *T) *T { + var x1 T + if x != nil { + x1 = *x + } + return &x1 +} + +// cachingTokenSource works similar to oauth2.ReuseTokenSource, except that it +// also exposes a hook to get a hold of the refreshed token, so that it can be +// stored in persistent storage. +type cachingTokenSource struct { + updateFunc func(tok *oauth2.Token) error + base oauth2.TokenSource // called when t is expired + + mu sync.Mutex // guards t + t *oauth2.Token +} + +func (s *cachingTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + t := s.t + + if t.Valid() { + s.mu.Unlock() + return t, nil + } + + t, err := s.base.Token() + if err != nil { + s.mu.Unlock() + return nil, err + } + + s.t = t + s.mu.Unlock() + + err = s.updateFunc(t) + if err != nil { + return nil, err + } + + return t, nil +} diff --git a/vendor/cuelang.org/go/mod/modfile/modfile.go b/vendor/cuelang.org/go/mod/modfile/modfile.go new file mode 100644 index 0000000000..a1e9529682 --- /dev/null +++ b/vendor/cuelang.org/go/mod/modfile/modfile.go @@ -0,0 +1,586 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package modfile provides functionality for reading and parsing +// the CUE module file, cue.mod/module.cue. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL. +// ITS API MAY CHANGE AT ANY TIME. +package modfile + +import ( + _ "embed" + "fmt" + "slices" + "strings" + "sync" + + "cuelang.org/go/internal/mod/semver" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/cuecontext" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/cueversion" + "cuelang.org/go/internal/encoding" + "cuelang.org/go/internal/filetypes" + "cuelang.org/go/mod/module" +) + +//go:embed schema.cue +var moduleSchemaData string + +const schemaFile = "cuelang.org/go/mod/modfile/schema.cue" + +// File represents the contents of a cue.mod/module.cue file. +type File struct { + // Module holds the module path, which may + // not contain a major version suffix. + // Use the [File.QualifiedModule] method to obtain a module + // path that's always qualified. See also the + // [File.ModulePath] and [File.MajorVersion] methods. + Module string `json:"module"` + Language *Language `json:"language,omitempty"` + Source *Source `json:"source,omitempty"` + Deps map[string]*Dep `json:"deps,omitempty"` + Custom map[string]map[string]any `json:"custom,omitempty"` + versions []module.Version + // defaultMajorVersions maps from module base path to the + // major version default for that path. + defaultMajorVersions map[string]string + // actualSchemaVersion holds the actual schema version + // that was used to validate the file. This will be one of the + // entries in the versions field in schema.cue and + // is set by the Parse functions. + actualSchemaVersion string +} + +// Module returns the fully qualified module path +// if is one. It returns the empty string when [ParseLegacy] +// is used and the module field is empty. +// +// Note that when the module field does not contain a major +// version suffix, "@v0" is assumed. +func (f *File) QualifiedModule() string { + if strings.Contains(f.Module, "@") { + return f.Module + } + if f.Module == "" { + return "" + } + return f.Module + "@v0" +} + +// ModulePath returns the path part of the module without +// its major version suffix. +func (f *File) ModulePath() string { + path, _, _ := module.SplitPathVersion(f.QualifiedModule()) + return path +} + +// MajorVersion returns the major version of the module, +// not including the "@". +// If there is no module (which can happen when [ParseLegacy] +// is used or if Module is explicitly set to an empty string), +// it returns the empty string. +func (f *File) MajorVersion() string { + _, vers, _ := module.SplitPathVersion(f.QualifiedModule()) + return vers +} + +// baseFileVersion is used to decode the language version +// to decide how to decode the rest of the file. +type baseFileVersion struct { + Language struct { + Version string `json:"version"` + } `json:"language"` +} + +// Source represents how to transform from a module's +// source to its actual contents. +type Source struct { + Kind string `json:"kind"` +} + +// Validate checks that src is well formed. +func (src *Source) Validate() error { + switch src.Kind { + case "git", "self": + return nil + } + return fmt.Errorf("unrecognized source kind %q", src.Kind) +} + +// Format returns a formatted representation of f +// in CUE syntax. +func (f *File) Format() ([]byte, error) { + if len(f.Deps) == 0 && f.Deps != nil { + // There's no way to get the CUE encoder to omit an empty + // but non-nil slice (despite the current doc comment on + // [cue.Context.Encode], so make a copy of f to allow us + // to do that. + f1 := *f + f1.Deps = nil + f = &f1 + } + // TODO this could be better: + // - it should omit the outer braces + v := cuecontext.New().Encode(f) + if err := v.Validate(cue.Concrete(true)); err != nil { + return nil, err + } + n := v.Syntax(cue.Concrete(true)).(*ast.StructLit) + + data, err := format.Node(&ast.File{ + Decls: n.Elts, + }) + if err != nil { + return nil, fmt.Errorf("cannot format: %v", err) + } + // Sanity check that it can be parsed. + // TODO this could be more efficient by checking all the file fields + // before formatting the output. + f1, err := ParseNonStrict(data, "-") + if err != nil { + return nil, fmt.Errorf("cannot round-trip module file: %v", strings.TrimSuffix(errors.Details(err, nil), "\n")) + } + if f.Language != nil && f1.actualSchemaVersion == "v0.0.0" { + // It's not a legacy module file (because the language field is present) + // but we've used the legacy schema to parse it, which means that + // it's almost certainly a bogus version because all versions + // we care about fail when there are unknown fields, but the + // original schema allowed all fields. + return nil, fmt.Errorf("language version %v is too early for module.cue (need at least %v)", f.Language.Version, EarliestClosedSchemaVersion()) + } + return data, err +} + +type Language struct { + Version string `json:"version,omitempty"` +} + +type Dep struct { + Version string `json:"v"` + Default bool `json:"default,omitempty"` +} + +type noDepsFile struct { + Module string `json:"module"` +} + +var ( + moduleSchemaOnce sync.Once // guards the creation of _moduleSchema + // TODO remove this mutex when https://cuelang.org/issue/2733 is fixed. + moduleSchemaMutex sync.Mutex // guards any use of _moduleSchema + _schemas schemaInfo +) + +type schemaInfo struct { + Versions map[string]cue.Value `json:"versions"` + EarliestClosedSchemaVersion string `json:"earliestClosedSchemaVersion"` +} + +// moduleSchemaDo runs f with information about all the schema versions +// present in schema.cue. It does this within a mutex because it is +// not currently allowed to use cue.Value concurrently. +// TODO remove the mutex when https://cuelang.org/issue/2733 is fixed. +func moduleSchemaDo[T any](f func(*schemaInfo) (T, error)) (T, error) { + moduleSchemaOnce.Do(func() { + // It is important that this cue.Context not be used for building any other cue.Value, + // such as in [Parse] or [ParseLegacy]. + // A value holds memory as long as the context it was built with is kept alive for, + // and this context is alive forever via the _schemas global. + // + // TODO(mvdan): this violates the documented API rules in the cue package: + // + // Only values created from the same Context can be involved in the same operation. + // + // However, this appears to work in practice, and all alternatives right now would be + // either too costly or awkward. We want to lift that API restriction, and this works OK, + // so leave it as-is for the time being. + ctx := cuecontext.New() + schemav := ctx.CompileString(moduleSchemaData, cue.Filename(schemaFile)) + if err := schemav.Decode(&_schemas); err != nil { + panic(fmt.Errorf("internal error: invalid CUE module.cue schema: %v", errors.Details(err, nil))) + } + }) + moduleSchemaMutex.Lock() + defer moduleSchemaMutex.Unlock() + return f(&_schemas) +} + +func lookup(v cue.Value, sels ...cue.Selector) cue.Value { + return v.LookupPath(cue.MakePath(sels...)) +} + +// EarliestClosedSchemaVersion returns the earliest module.cue schema version +// that excludes unknown fields. Any version declared in a module.cue file +// should be at least this, because that's when we added the language.version +// field itself. +func EarliestClosedSchemaVersion() string { + return schemaVersionLimits()[0] +} + +// LatestKnownSchemaVersion returns the language version +// associated with the most recent known schema. +func LatestKnownSchemaVersion() string { + return schemaVersionLimits()[1] +} + +var schemaVersionLimits = sync.OnceValue(func() [2]string { + limits, _ := moduleSchemaDo(func(info *schemaInfo) ([2]string, error) { + earliest := "" + latest := "" + for v := range info.Versions { + if earliest == "" || semver.Compare(v, earliest) < 0 { + earliest = v + } + if latest == "" || semver.Compare(v, latest) > 0 { + latest = v + } + } + return [2]string{earliest, latest}, nil + }) + return limits +}) + +// Parse verifies that the module file has correct syntax +// and follows the schema following the required language.version field. +// The file name is used for error messages. +// All dependencies must be specified correctly: with major +// versions in the module paths and canonical dependency versions. +func Parse(modfile []byte, filename string) (*File, error) { + return parse(modfile, filename, true) +} + +// ParseLegacy parses the legacy version of the module file +// that only supports the single field "module" and ignores all other +// fields. +func ParseLegacy(modfile []byte, filename string) (*File, error) { + ctx := cuecontext.New() + file, err := parseDataOnlyCUE(ctx, modfile, filename) + if err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax") + } + // Unfortunately we need a new context. See the note inside [moduleSchemaDo]. + v := ctx.BuildFile(file) + if err := v.Err(); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file") + } + var f noDepsFile + if err := v.Decode(&f); err != nil { + return nil, newCUEError(err, filename) + } + return &File{ + Module: f.Module, + actualSchemaVersion: "v0.0.0", + }, nil +} + +// ParseNonStrict is like Parse but allows some laxity in the parsing: +// - if a module path lacks a version, it's taken from the version. +// - if a non-canonical version is used, it will be canonicalized. +// +// The file name is used for error messages. +func ParseNonStrict(modfile []byte, filename string) (*File, error) { + return parse(modfile, filename, false) +} + +// FixLegacy converts a legacy module.cue file as parsed by [ParseLegacy] +// into a format suitable for parsing with [Parse]. It adds a language.version +// field and moves all unrecognized fields into custom.legacy. +// +// If there is no module field or it is empty, it is set to "test.example". +// +// If the file already parses OK with [ParseNonStrict], it returns the +// result of that. +func FixLegacy(modfile []byte, filename string) (*File, error) { + f, err := ParseNonStrict(modfile, filename) + if err == nil { + // It parses OK so it doesn't need fixing. + return f, nil + } + ctx := cuecontext.New() + file, err := parseDataOnlyCUE(ctx, modfile, filename) + if err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax") + } + v := ctx.BuildFile(file) + if err := v.Validate(cue.Concrete(true)); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file value") + } + var allFields map[string]any + if err := v.Decode(&allFields); err != nil { + return nil, err + } + mpath := "test.example" + if m, ok := allFields["module"]; ok { + if mpath1, ok := m.(string); ok && mpath1 != "" { + mpath = mpath1 + } else if !ok { + return nil, fmt.Errorf("module field has unexpected type %T", m) + } + // TODO decide what to do if the module path isn't OK according to the new rules. + } + customLegacy := make(map[string]any) + for k, v := range allFields { + if k != "module" { + customLegacy[k] = v + } + } + var custom map[string]map[string]any + if len(customLegacy) > 0 { + custom = map[string]map[string]any{ + "legacy": customLegacy, + } + } + f = &File{ + Module: mpath, + Language: &Language{ + Version: cueversion.LanguageVersion(), + }, + Custom: custom, + } + // Round-trip through [Parse] so that we get exactly the same + // result as a later parse of the same data will. This also + // adds a major version to the module path if needed. + data, err := f.Format() + if err != nil { + return nil, fmt.Errorf("cannot format fixed file: %v", err) + } + f, err = ParseNonStrict(data, "fixed-"+filename) + if err != nil { + return nil, fmt.Errorf("cannot round-trip fixed module file %q: %v", data, err) + } + return f, nil +} + +func parse(modfile []byte, filename string, strict bool) (*File, error) { + // Unfortunately we need a new context. See the note inside [moduleSchemaDo]. + ctx := cuecontext.New() + file, err := parseDataOnlyCUE(ctx, modfile, filename) + if err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax") + } + + v := ctx.BuildFile(file) + if err := v.Validate(cue.Concrete(true)); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file value") + } + // First determine the declared version of the module file. + var base baseFileVersion + if err := v.Decode(&base); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "cannot determine language version") + } + if base.Language.Version == "" { + return nil, ErrNoLanguageVersion + } + if !semver.IsValid(base.Language.Version) { + return nil, fmt.Errorf("language version %q in module.cue is not valid semantic version", base.Language.Version) + } + if mv, lv := base.Language.Version, cueversion.LanguageVersion(); semver.Compare(mv, lv) > 0 { + return nil, fmt.Errorf("language version %q declared in module.cue is too new for current language version %q", mv, lv) + } + + mf, err := moduleSchemaDo(func(schemas *schemaInfo) (*File, error) { + // Now that we're happy we're within bounds, find the latest + // schema that applies to the declared version. + latest := "" + var latestSchema cue.Value + for vers, schema := range schemas.Versions { + if semver.Compare(vers, base.Language.Version) > 0 { + continue + } + if latest == "" || semver.Compare(vers, latest) > 0 { + latest = vers + latestSchema = schema + } + } + if latest == "" { + // Should never happen, because there should always + // be some applicable schema. + return nil, fmt.Errorf("cannot find schema suitable for reading module file with language version %q", base.Language.Version) + } + schema := latestSchema + v = v.Unify(lookup(schema, cue.Def("#File"))) + if err := v.Validate(); err != nil { + return nil, newCUEError(err, filename) + } + if latest == "v0.0.0" { + // The chosen schema is the earliest schema which allowed + // all fields. We don't actually want a module.cue file with + // an old version to treat those fields as special, so don't try + // to decode into *File because that will do so. + // This mirrors the behavior of [ParseLegacy]. + var f noDepsFile + if err := v.Decode(&f); err != nil { + return nil, newCUEError(err, filename) + } + return &File{ + Module: f.Module, + actualSchemaVersion: "v0.0.0", + }, nil + } + var mf File + if err := v.Decode(&mf); err != nil { + return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into modFile struct") + } + mf.actualSchemaVersion = latest + return &mf, nil + }) + if err != nil { + return nil, err + } + mainPath, mainMajor, ok := module.SplitPathVersion(mf.Module) + if ok { + if semver.Major(mainMajor) != mainMajor { + return nil, fmt.Errorf("module path %s in %q should contain the major version only", mf.Module, filename) + } + } else if mainPath = mf.Module; mainPath != "" { + if err := module.CheckPathWithoutVersion(mainPath); err != nil { + return nil, fmt.Errorf("module path %q in %q is not valid: %v", mainPath, filename, err) + } + // There's no main module major version: default to v0. + mainMajor = "v0" + } + if mf.Language != nil { + vers := mf.Language.Version + if !semver.IsValid(vers) { + return nil, fmt.Errorf("language version %q in %s is not well formed", vers, filename) + } + if semver.Canonical(vers) != vers { + return nil, fmt.Errorf("language version %v in %s is not canonical", vers, filename) + } + } + var versions []module.Version + // The main module is always the default for its own major version. + defaultMajorVersions := map[string]string{ + mainPath: mainMajor, + } + // Check that major versions match dependency versions. + for m, dep := range mf.Deps { + vers, err := module.NewVersion(m, dep.Version) + if err != nil { + return nil, fmt.Errorf("invalid module.cue file %s: cannot make version from module %q, version %q: %v", filename, m, dep.Version, err) + } + versions = append(versions, vers) + if strict && vers.Path() != m { + return nil, fmt.Errorf("invalid module.cue file %s: no major version in %q", filename, m) + } + if dep.Default { + mp := vers.BasePath() + if _, ok := defaultMajorVersions[mp]; ok { + return nil, fmt.Errorf("multiple default major versions found for %v", mp) + } + defaultMajorVersions[mp] = semver.Major(vers.Version()) + } + } + + if len(defaultMajorVersions) > 0 { + mf.defaultMajorVersions = defaultMajorVersions + } + mf.versions = versions[:len(versions):len(versions)] + module.Sort(mf.versions) + return mf, nil +} + +// ErrNoLanguageVersion is returned by [Parse] and [ParseNonStrict] +// when a cue.mod/module.cue file lacks the `language.version` field. +var ErrNoLanguageVersion = fmt.Errorf("no language version declared in module.cue") + +func parseDataOnlyCUE(ctx *cue.Context, cueData []byte, filename string) (*ast.File, error) { + dec := encoding.NewDecoder(ctx, &build.File{ + Filename: filename, + Encoding: build.CUE, + Interpretation: build.Auto, + Form: build.Data, + Source: cueData, + }, &encoding.Config{ + Mode: filetypes.Export, + AllErrors: true, + }) + if err := dec.Err(); err != nil { + return nil, err + } + return dec.File(), nil +} + +func newCUEError(err error, filename string) error { + ps := errors.Positions(err) + for _, p := range ps { + if errStr := findErrorComment(p); errStr != "" { + return fmt.Errorf("invalid module.cue file: %s", errStr) + } + } + // TODO we have more potential to improve error messages here. + return err +} + +// findErrorComment finds an error comment in the form +// +// //error: ... +// +// before the given position. +// This works as a kind of poor-man's error primitive +// so we can customize the error strings when verification +// fails. +func findErrorComment(p token.Pos) string { + if p.Filename() != schemaFile { + return "" + } + off := p.Offset() + source := moduleSchemaData + if off > len(source) { + return "" + } + source, _, ok := cutLast(source[:off], "\n") + if !ok { + return "" + } + _, errorLine, ok := cutLast(source, "\n") + if !ok { + return "" + } + errStr, ok := strings.CutPrefix(errorLine, "//error: ") + if !ok { + return "" + } + return errStr +} + +func cutLast(s, sep string) (before, after string, found bool) { + if i := strings.LastIndex(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return "", s, false +} + +// DepVersions returns the versions of all the modules depended on by the +// file. The caller should not modify the returned slice. +// +// This always returns the same value, even if the contents +// of f are changed. If f was not created with [Parse], it returns nil. +func (f *File) DepVersions() []module.Version { + return slices.Clip(f.versions) +} + +// DefaultMajorVersions returns a map from module base path +// to the major version that's specified as the default for that module. +// The caller should not modify the returned map. +func (f *File) DefaultMajorVersions() map[string]string { + return f.defaultMajorVersions +} diff --git a/vendor/cuelang.org/go/mod/modfile/schema.cue b/vendor/cuelang.org/go/mod/modfile/schema.cue new file mode 100644 index 0000000000..c6d5692441 --- /dev/null +++ b/vendor/cuelang.org/go/mod/modfile/schema.cue @@ -0,0 +1,147 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This schema constrains a module.cue file. This form constrains module.cue files +// outside of the main module. For the module.cue file in a main module, the schema +// is less restrictive, because wherever #Semver is used, a less specific version may be +// used instead, which will be rewritten to the canonical form by the cue command tooling. +// To check against that form, + +// versions holds an element for each supported version +// of the schema. The version key specifies that +// the schema covers all versions from then until the +// next version present in versions, or until the current CUE version, +// whichever is earlier. +versions: [string]: { + // #File represents the overarching module file schema. + #File!: { + // We always require the language version, as we use + // that to determine how to parse the file itself. + // Note that this schema is not used by [ParseLegacy] + // because legacy module.cue files did not support + // language.version field. + language!: version!: string + } + + // #Strict can be unified with the top level schema to enforce the strict version + // of the schema required when publishing a module. + #Strict!: _ +} + +versions: "v0.8.0-alpha.0": { + // Define this version in terms of the later versions + // rather than the other way around, so that + // the latest version is clearest. + versions["v0.9.0-alpha.0"] + + // The source field was added in v0.9.0, so "remove" + // it here by marking it as an error when used. + #File: source?: _errorSourceFieldRequiredVersion +} + +versions: "v0.9.0-alpha.0": { + #File: { + // module indicates the module's path. + module?: #Module | "" + + // version indicates the language version used by the code in this module + // - the minimum version of CUE required to evaluate the code in this + // module. When a later version of CUE is evaluating code in this module, + // this will be used to choose version-specific behavior. If an earlier + // version of CUE is used, an error will be given. + language?: version?: #Semver + + // source holds information about the source of the files within the + // module. This field is mandatory at publish time. + source?: #Source + + // description describes the purpose of this module. + description?: string + + // deps holds dependency information for modules, keyed by module path. + deps?: [#Module]: #Dep + + // custom holds arbitrary data intended for use by third-party tools. + // Each field at the top level represents a tooling namespace, + // conventionally a module or domain name. Data migrated from legacy + // module.cue files is placed in the "legacy" namespace. + custom?: [#Module | "legacy"]: [_]: _ + + #Dep: { + // v indicates the minimum required version of the module. This can + // be null if the version is unknown and the module entry is only + // present to be replaced. + v!: #Semver | null + + // default indicates this module is used as a default in case more + // than one major version is specified for the same module path. + // Imports must specify the exact major version for a module path if + // there is more than one major version for that path and default is + // not set for exactly one of them. + default?: bool + } + + // #Module constrains a module path. The major version indicator is + // optional, but should always be present in a normalized module.cue + // file. + #Module: string + + // #Semver constrains a semantic version. + #Semver: =~"." + } + + // #Strict can be unified with the top level schema to enforce the strict version + // of the schema required by the registry. + #Strict: #File & { + // This regular expression is taken from https://semver.org/spec/v2.0.0.html + #Semver: =~#"^v(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"# + + #Module: =~#"^[^@]+(@v(0|[1-9]\d*))$"# + + // The module declaration is required. + module!: #Module + + // No null versions, because no replacements yet. + #Dep: v!: #Semver + } + + // #Source describes a source of truth for a module's content. + #Source: { + // kind specifies the kind of source. + // + // The special value "self" signifies a module is stand-alone, associated + // with no particular source. The module's file list is determined from + // the contents of the directory (and its subdirectories) that contains + // the cue.mod directory. + // + // See https://cuelang.org/docs/reference/modules/#determining-zip-file-contents + // for details on all the possible values for kind, and how they relate + // to determining the list of files in a module. + kind!: "self" | "git" + + // TODO support for other VCSs: + // kind!: "self" | "git" | "bzr" | "hg" | "svn" + } +} + +// The //error comments are specially recognized by the parsing +// code so we can avoid opaque conflict errors. +// TODO use error function when that's available. +// +// Note: we're using 1&2 rather than _|_ because +// use of _|_ causes the source location of the errors +// to be lost. See https://github.com/cue-lang/cue/issues/2319. + +//error: source field is not allowed at this language version; need at least v0.9.0-alpha.0 +let _errorSourceFieldRequiredVersion = 1 & 2 diff --git a/vendor/cuelang.org/go/mod/modregistry/client.go b/vendor/cuelang.org/go/mod/modregistry/client.go new file mode 100644 index 0000000000..c3cc9e153f --- /dev/null +++ b/vendor/cuelang.org/go/mod/modregistry/client.go @@ -0,0 +1,504 @@ +// Copyright 2023 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package modregistry provides functionality for reading and writing +// CUE modules from an OCI registry. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL. +// ITS API MAY CHANGE AT ANY TIME. +package modregistry + +import ( + "archive/zip" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "cuelabs.dev/go/oci/ociregistry" + "cuelang.org/go/internal/mod/semver" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "cuelang.org/go/mod/modfile" + "cuelang.org/go/mod/module" + "cuelang.org/go/mod/modzip" +) + +var ErrNotFound = fmt.Errorf("module not found") + +// Client represents a OCI-registry-backed client that +// provides a store for CUE modules. +type Client struct { + resolver Resolver +} + +// Resolver resolves module paths to a registry and a location +// within that registry. +type Resolver interface { + // ResolveToRegistry resolves a base module path (without a version) + // and optional version to the location for that path. + // + // If the version is empty, the Tag in the returned Location + // will hold the prefix that all versions of the module in its + // repository have. That prefix will be followed by the version + // itself. + // + // If there is no registry configured for the module, it returns + // an [ErrRegistryNotFound] error. + ResolveToRegistry(mpath, vers string) (RegistryLocation, error) +} + +// ErrRegistryNotFound is returned by [Resolver.ResolveToRegistry] +// when there is no registry configured for a module. +var ErrRegistryNotFound = fmt.Errorf("no registry configured for module") + +// RegistryLocation holds a registry and a location within it +// that a specific module (or set of versions for a module) +// will be stored. +type RegistryLocation struct { + // Registry holds the registry to use to access the module. + Registry ociregistry.Interface + // Repository holds the repository where the module is located. + Repository string + // Tag holds the tag for the module version. If an empty version + // was passed to Resolve, it holds the prefix shared by all + // version tags for the module. + Tag string +} + +const ( + moduleArtifactType = "application/vnd.cue.module.v1+json" + moduleFileMediaType = "application/vnd.cue.modulefile.v1" + moduleAnnotation = "works.cue.module" +) + +// NewClient returns a new client that talks to the registry at the given +// hostname. +func NewClient(registry ociregistry.Interface) *Client { + return &Client{ + resolver: singleResolver{registry}, + } +} + +// NewClientWithResolver returns a new client that uses the given +// resolver to decide which registries to fetch from or push to. +func NewClientWithResolver(resolver Resolver) *Client { + return &Client{ + resolver: resolver, + } +} + +// GetModule returns the module instance for the given version. +// It returns an error that satisfies errors.Is(ErrNotFound) if the +// module is not present in the store at this version. +func (c *Client) GetModule(ctx context.Context, m module.Version) (*Module, error) { + loc, err := c.resolve(m) + if err != nil { + if errors.Is(err, ErrRegistryNotFound) { + return nil, ErrNotFound + } + return nil, err + } + rd, err := loc.Registry.GetTag(ctx, loc.Repository, loc.Tag) + if err != nil { + if isNotExist(err) { + return nil, fmt.Errorf("module %v: %w", m, ErrNotFound) + } + return nil, fmt.Errorf("module %v: %w", m, err) + } + defer rd.Close() + data, err := io.ReadAll(rd) + if err != nil { + return nil, err + } + + return c.GetModuleWithManifest(m, data, rd.Descriptor().MediaType) +} + +// GetModuleWithManifest returns a module instance given +// the top level manifest contents, without querying its tag. +// It assumes that the module will be tagged with the given version. +func (c *Client) GetModuleWithManifest(m module.Version, contents []byte, mediaType string) (*Module, error) { + loc, err := c.resolve(m) + if err != nil { + // Note: don't return [ErrNotFound] here because if we've got the + // manifest we should be pretty sure that the module actually + // exists, so it's a harder error than if we're getting the module + // by tag. + return nil, err + } + + manifest, err := unmarshalManifest(contents, mediaType) + if err != nil { + return nil, fmt.Errorf("module %v: %v", m, err) + } + if !isModule(manifest) { + return nil, fmt.Errorf("%v does not resolve to a manifest (media type is %q)", m, mediaType) + } + // TODO check type of manifest too. + if n := len(manifest.Layers); n != 2 { + return nil, fmt.Errorf("module manifest should refer to exactly two blobs, but got %d", n) + } + if !isModuleFile(manifest.Layers[1]) { + return nil, fmt.Errorf("unexpected media type %q for module file blob", manifest.Layers[1].MediaType) + } + // TODO check that the other blobs are of the expected type (application/zip). + return &Module{ + client: c, + loc: loc, + version: m, + manifest: *manifest, + manifestDigest: digest.FromBytes(contents), + }, nil +} + +// ModuleVersions returns all the versions for the module with the given path +// sorted in semver order. +// If m has a major version suffix, only versions with that major version will +// be returned. +func (c *Client) ModuleVersions(ctx context.Context, m string) ([]string, error) { + mpath, major, hasMajor := module.SplitPathVersion(m) + if !hasMajor { + mpath = m + } + loc, err := c.resolver.ResolveToRegistry(mpath, "") + if err != nil { + if errors.Is(err, ErrRegistryNotFound) { + return nil, nil + } + return nil, err + } + versions := []string{} + // Note: do not use c.repoName because that always expects + // a module path with a major version. + iter := loc.Registry.Tags(ctx, loc.Repository, "") + var _err error + iter(func(tag string, err error) bool { + if err != nil { + _err = err + return false + } + vers, ok := strings.CutPrefix(tag, loc.Tag) + if !ok || !semver.IsValid(vers) { + return true + } + if !hasMajor || semver.Major(vers) == major { + versions = append(versions, vers) + } + return true + }) + if _err != nil && !isNotExist(_err) { + return nil, fmt.Errorf("module %v: %w", m, _err) + } + semver.Sort(versions) + return versions, nil +} + +// checkedModule represents module content that has passed the same +// checks made by [Client.PutModule]. The caller should not mutate +// any of the values returned by its methods. +type checkedModule struct { + mv module.Version + blobr io.ReaderAt + size int64 + zipr *zip.Reader + modFile *modfile.File + modFileContent []byte +} + +// putCheckedModule is like [Client.PutModule] except that it allows the +// caller to do some additional checks (see [CheckModule] for more info). +func (c *Client) putCheckedModule(ctx context.Context, m *checkedModule, meta *Metadata) error { + var annotations map[string]string + if meta != nil { + annotations0, err := meta.annotations() + if err != nil { + return fmt.Errorf("invalid metadata: %v", err) + } + annotations = annotations0 + } + loc, err := c.resolve(m.mv) + if err != nil { + return err + } + selfDigest, err := digest.FromReader(io.NewSectionReader(m.blobr, 0, m.size)) + if err != nil { + return fmt.Errorf("cannot read module zip file: %v", err) + } + // Upload the actual module's content + // TODO should we use a custom media type for this? + configDesc, err := c.scratchConfig(ctx, loc, moduleArtifactType) + if err != nil { + return fmt.Errorf("cannot make scratch config: %v", err) + } + manifest := &ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + MediaType: ocispec.MediaTypeImageManifest, + Config: configDesc, + // One for self, one for module file. + Layers: []ocispec.Descriptor{{ + Digest: selfDigest, + MediaType: "application/zip", + Size: m.size, + }, { + Digest: digest.FromBytes(m.modFileContent), + MediaType: moduleFileMediaType, + Size: int64(len(m.modFileContent)), + }}, + Annotations: annotations, + } + + if _, err := loc.Registry.PushBlob(ctx, loc.Repository, manifest.Layers[0], io.NewSectionReader(m.blobr, 0, m.size)); err != nil { + return fmt.Errorf("cannot push module contents: %v", err) + } + if _, err := loc.Registry.PushBlob(ctx, loc.Repository, manifest.Layers[1], bytes.NewReader(m.modFileContent)); err != nil { + return fmt.Errorf("cannot push cue.mod/module.cue contents: %v", err) + } + manifestData, err := json.Marshal(manifest) + if err != nil { + return fmt.Errorf("cannot marshal manifest: %v", err) + } + if _, err := loc.Registry.PushManifest(ctx, loc.Repository, loc.Tag, manifestData, ocispec.MediaTypeImageManifest); err != nil { + return fmt.Errorf("cannot tag %v: %v", m.mv, err) + } + return nil +} + +// PutModule puts a module whose contents are held as a zip archive inside f. +// It assumes all the module dependencies are correctly resolved and present +// inside the cue.mod/module.cue file. +func (c *Client) PutModule(ctx context.Context, m module.Version, r io.ReaderAt, size int64) error { + return c.PutModuleWithMetadata(ctx, m, r, size, nil) +} + +// PutModuleWithMetadata is like [Client.PutModule] except that it also +// includes the given metadata inside the module's manifest. +// If meta is nil, no metadata will be included, otherwise +// all fields in meta must be valid and non-empty. +func (c *Client) PutModuleWithMetadata(ctx context.Context, m module.Version, r io.ReaderAt, size int64, meta *Metadata) error { + cm, err := checkModule(m, r, size) + if err != nil { + return err + } + return c.putCheckedModule(ctx, cm, meta) +} + +// checkModule checks a module's zip file before uploading it. +// This does the same checks that [Client.PutModule] does, so +// can be used to avoid doing duplicate work when an uploader +// wishes to do more checks that are implemented by that method. +// +// Note that the returned [CheckedModule] value contains r, so will +// be invalidated if r is closed. +func checkModule(m module.Version, blobr io.ReaderAt, size int64) (*checkedModule, error) { + zipr, modf, _, err := modzip.CheckZip(m, blobr, size) + if err != nil { + return nil, fmt.Errorf("module zip file check failed: %v", err) + } + modFileContent, mf, err := checkModFile(m, modf) + if err != nil { + return nil, fmt.Errorf("module.cue file check failed: %v", err) + } + return &checkedModule{ + mv: m, + blobr: blobr, + size: size, + zipr: zipr, + modFile: mf, + modFileContent: modFileContent, + }, nil +} + +func checkModFile(m module.Version, f *zip.File) ([]byte, *modfile.File, error) { + r, err := f.Open() + if err != nil { + return nil, nil, err + } + defer r.Close() + // TODO check max size? + data, err := io.ReadAll(r) + if err != nil { + return nil, nil, err + } + mf, err := modfile.Parse(data, f.Name) + if err != nil { + return nil, nil, err + } + if mf.QualifiedModule() != m.Path() { + return nil, nil, fmt.Errorf("module path %q found in %s does not match module path being published %q", mf.QualifiedModule(), f.Name, m.Path()) + } + wantMajor := semver.Major(m.Version()) + if major := mf.MajorVersion(); major != wantMajor { + // This can't actually happen because the zip checker checks the major version + // that's being published to, so the above path check also implicitly checks that. + return nil, nil, fmt.Errorf("major version %q found in %s does not match version being published %q", major, f.Name, m.Version()) + } + // Check that all dependency versions look valid. + for modPath, dep := range mf.Deps { + _, err := module.NewVersion(modPath, dep.Version) + if err != nil { + return nil, nil, fmt.Errorf("invalid dependency: %v @ %v", modPath, dep.Version) + } + } + return data, mf, nil +} + +// Module represents a CUE module instance. +type Module struct { + client *Client + loc RegistryLocation + version module.Version + manifest ocispec.Manifest + manifestDigest ociregistry.Digest +} + +func (m *Module) Version() module.Version { + return m.version +} + +// ModuleFile returns the contents of the cue.mod/module.cue file. +func (m *Module) ModuleFile(ctx context.Context) ([]byte, error) { + r, err := m.loc.Registry.GetBlob(ctx, m.loc.Repository, m.manifest.Layers[1].Digest) + if err != nil { + return nil, err + } + defer r.Close() + return io.ReadAll(r) +} + +// Metadata returns the metadata associated with the module. +// If there is none, it returns (nil, nil). +func (m *Module) Metadata() (*Metadata, error) { + return newMetadataFromAnnotations(m.manifest.Annotations) +} + +// GetZip returns a reader that can be used to read the contents of the zip +// archive containing the module files. The reader should be closed after use, +// and the contents should not be assumed to be correct until the close +// error has been checked. +func (m *Module) GetZip(ctx context.Context) (io.ReadCloser, error) { + return m.loc.Registry.GetBlob(ctx, m.loc.Repository, m.manifest.Layers[0].Digest) +} + +// ManifestDigest returns the digest of the manifest representing +// the module. +func (m *Module) ManifestDigest() ociregistry.Digest { + return m.manifestDigest +} + +func (c *Client) resolve(m module.Version) (RegistryLocation, error) { + loc, err := c.resolver.ResolveToRegistry(m.BasePath(), m.Version()) + if err != nil { + return RegistryLocation{}, err + } + if loc.Registry == nil { + return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to nil registry", m) + } + if loc.Repository == "" { + return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to empty location", m) + } + if loc.Tag == "" { + return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to empty tag", m) + } + return loc, nil +} + +func unmarshalManifest(data []byte, mediaType string) (*ociregistry.Manifest, error) { + if !isJSON(mediaType) { + return nil, fmt.Errorf("expected JSON media type but %q does not look like JSON", mediaType) + } + var m ociregistry.Manifest + if err := json.Unmarshal(data, &m); err != nil { + return nil, fmt.Errorf("cannot decode %s content as manifest: %v", mediaType, err) + } + return &m, nil +} + +func isNotExist(err error) bool { + if errors.Is(err, ociregistry.ErrNameUnknown) || + errors.Is(err, ociregistry.ErrNameInvalid) { + return true + } + // A 403 error might have been sent as a response + // without explicitly including a "denied" error code. + // We treat this as a "not found" error because there's + // nothing the user can do about it. + // + // Also, some registries return an invalid error code with a 404 + // response (see https://cuelang.org/issue/2982), so it + // seems reasonable to treat that as a non-found error too. + if herr := ociregistry.HTTPError(nil); errors.As(err, &herr) { + statusCode := herr.StatusCode() + return statusCode == http.StatusForbidden || + statusCode == http.StatusNotFound + } + return false +} + +func isModule(m *ocispec.Manifest) bool { + // TODO check m.ArtifactType too when that's defined? + // See https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions + return m.Config.MediaType == moduleArtifactType +} + +func isModuleFile(desc ocispec.Descriptor) bool { + return desc.ArtifactType == moduleFileMediaType || + desc.MediaType == moduleFileMediaType +} + +// isJSON reports whether the given media type has JSON as an underlying encoding. +// TODO this is a guess. There's probably a more correct way to do it. +func isJSON(mediaType string) bool { + return strings.HasSuffix(mediaType, "+json") || strings.HasSuffix(mediaType, "/json") +} + +// scratchConfig returns a dummy configuration consisting only of the +// two-byte configuration {}. +// https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-of-a-scratch-config-or-layer-descriptor +func (c *Client) scratchConfig(ctx context.Context, loc RegistryLocation, mediaType string) (ocispec.Descriptor, error) { + // TODO check if it exists already to avoid push? + content := []byte("{}") + desc := ocispec.Descriptor{ + Digest: digest.FromBytes(content), + MediaType: mediaType, + Size: int64(len(content)), + } + if _, err := loc.Registry.PushBlob(ctx, loc.Repository, desc, bytes.NewReader(content)); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +// singleResolver implements Resolver by always returning R, +// and mapping module paths directly to repository paths in +// the registry. +type singleResolver struct { + R ociregistry.Interface +} + +func (r singleResolver) ResolveToRegistry(mpath, vers string) (RegistryLocation, error) { + return RegistryLocation{ + Registry: r.R, + Repository: mpath, + Tag: vers, + }, nil +} diff --git a/vendor/cuelang.org/go/mod/modregistry/metadata.go b/vendor/cuelang.org/go/mod/modregistry/metadata.go new file mode 100644 index 0000000000..38d79b28ab --- /dev/null +++ b/vendor/cuelang.org/go/mod/modregistry/metadata.go @@ -0,0 +1,58 @@ +package modregistry + +import ( + "encoding/json" + "fmt" + "time" +) + +// Metadata holds extra information that can be associated with +// a module. It is stored in the module's manifest inside +// the annotations field. All fields must JSON-encode to +// strings. +type Metadata struct { + VCSType string `json:"org.cuelang.vcs-type"` + VCSCommit string `json:"org.cuelang.vcs-commit"` + VCSCommitTime time.Time `json:"org.cuelang.vcs-commit-time"` +} + +func newMetadataFromAnnotations(annotations map[string]string) (*Metadata, error) { + // TODO if this ever turned out to be a bottleneck we could + // improve performance by avoiding the round-trip through JSON. + raw, err := json.Marshal(annotations) + if err != nil { + // Should never happen. + return nil, err + } + var m Metadata + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return &m, nil +} + +func (m *Metadata) annotations() (map[string]string, error) { + // The "is-empty" checks don't work for time.Time + // so check explicitly. + if m.VCSCommitTime.IsZero() { + return nil, fmt.Errorf("no commit time in metadata") + } + // TODO if this ever turned out to be a bottleneck we could + // improve performance by avoiding the round-trip through JSON. + data, err := json.Marshal(m) + if err != nil { + // Should never happen. + return nil, err + } + var annotations map[string]string + if err := json.Unmarshal(data, &annotations); err != nil { + // Should never happen. + return nil, err + } + for field, val := range annotations { + if val == "" { + return nil, fmt.Errorf("empty metadata value for field %q", field) + } + } + return annotations, nil +} diff --git a/vendor/cuelang.org/go/mod/module/dirfs.go b/vendor/cuelang.org/go/mod/module/dirfs.go new file mode 100644 index 0000000000..42cbbbd139 --- /dev/null +++ b/vendor/cuelang.org/go/mod/module/dirfs.go @@ -0,0 +1,56 @@ +package module + +import ( + "io/fs" + "os" +) + +// SourceLoc represents the location of some CUE source code. +type SourceLoc struct { + // FS is the filesystem containing the source. + FS fs.FS + // Dir is the directory within the above filesystem. + Dir string +} + +// OSRootFS can be implemented by an [fs.FS] +// implementation to return its root directory as +// an OS file path. +type OSRootFS interface { + fs.FS + + // OSRoot returns the root directory of the FS + // as an OS file path. If it wasn't possible to do that, + // it returns the empty string. + OSRoot() string +} + +// OSDirFS is like [os.DirFS] but the returned value implements +// [OSRootFS] by returning p. +func OSDirFS(p string) fs.FS { + return dirFSImpl{ + augmentedFS: os.DirFS(p).(augmentedFS), + osRoot: p, + } +} + +var _ interface { + augmentedFS + OSRootFS +} = dirFSImpl{} + +type augmentedFS interface { + fs.FS + fs.StatFS + fs.ReadDirFS + fs.ReadFileFS +} + +type dirFSImpl struct { + osRoot string + augmentedFS +} + +func (fsys dirFSImpl) OSRoot() string { + return fsys.osRoot +} diff --git a/vendor/cuelang.org/go/internal/mod/module/error.go b/vendor/cuelang.org/go/mod/module/error.go similarity index 79% rename from vendor/cuelang.org/go/internal/mod/module/error.go rename to vendor/cuelang.org/go/mod/module/error.go index db3ef1b21c..3ab91a4460 100644 --- a/vendor/cuelang.org/go/internal/mod/module/error.go +++ b/vendor/cuelang.org/go/mod/module/error.go @@ -1,7 +1,6 @@ package module import ( - "errors" "fmt" ) @@ -12,20 +11,6 @@ type ModuleError struct { Err error } -// VersionError returns a ModuleError derived from a Version and error, -// or err itself if it is already such an error. -func VersionError(v Version, err error) error { - var mErr *ModuleError - if errors.As(err, &mErr) && mErr.Path == v.Path() && mErr.Version == v.Version() { - return err - } - return &ModuleError{ - Path: v.Path(), - Version: v.Version(), - Err: err, - } -} - func (e *ModuleError) Error() string { if v, ok := e.Err.(*InvalidVersionError); ok { return fmt.Sprintf("%s@%s: invalid version: %v", e.Path, v.Version, v.Err) diff --git a/vendor/cuelang.org/go/mod/module/escape.go b/vendor/cuelang.org/go/mod/module/escape.go new file mode 100644 index 0000000000..7c0c44201f --- /dev/null +++ b/vendor/cuelang.org/go/mod/module/escape.go @@ -0,0 +1,68 @@ +package module + +import ( + "fmt" + "strings" + "unicode/utf8" + + "cuelang.org/go/internal/mod/semver" +) + +// EscapePath returns the escaped form of the given module path +// (without the major version suffix). +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPathWithoutVersion(path); err != nil { + return "", err + } + // Technically there's no need to escape capital letters because CheckPath + // doesn't allow them, but let's be defensive. + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions must be in (possibly non-canonical) semver form and must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if !semver.IsValid(v) { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("version is not in semver syntax"), + } + } + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} diff --git a/vendor/cuelang.org/go/mod/module/module.go b/vendor/cuelang.org/go/mod/module/module.go new file mode 100644 index 0000000000..ed632f343d --- /dev/null +++ b/vendor/cuelang.org/go/mod/module/module.go @@ -0,0 +1,271 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the [Version] type along with support code. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL. +// ITS API MAY CHANGE AT ANY TIME. +// +// The [Version] type holds a pair of module path and version. +// The module path conforms to the checks implemented by [Check]. +// +// # Escaped Paths +// +// Module versions appear as substrings of file system paths (as stored by +// the modcache package). +// In general we cannot rely on file systems to be case-sensitive. Although +// module paths cannot currently contain upper case characters because +// OCI registries forbid that, versions can. That +// is, we cannot rely on the file system to keep foo.com/v@v1.0.0-PRE and +// foo.com/v@v1.0.0-PRE separate. Windows and macOS don't. Instead, we must +// never require two different casings of a file path. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// foo.com/v@v1.0.0-PRE -> foo.com/v@v1.0.0-!p!r!e +// +// Versions that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Neither versions nor module paths allow exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the cue command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. + +import ( + "cmp" + "fmt" + "slices" + "strings" + + "cuelang.org/go/internal/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +// This type is comparable. +type Version struct { + path string + version string +} + +// Path returns the module path part of the Version, +// which always includes the major version suffix +// unless a module path, like "github.com/foo/bar@v0". +// Note that in general the path should include the major version suffix +// even though it's implied from the version. The Canonical +// method can be used to add the major version suffix if not present. +// The BasePath method can be used to obtain the path without +// the suffix. +func (m Version) Path() string { + return m.path +} + +// Equal reports whether m is equal to m1. +func (m Version) Equal(m1 Version) bool { + return m.path == m1.path && m.version == m1.version +} + +// BasePath returns the path part of m without its major version suffix. +func (m Version) BasePath() string { + if m.IsLocal() { + return m.path + } + basePath, _, ok := SplitPathVersion(m.path) + if !ok { + panic(fmt.Errorf("broken invariant: failed to split version in %q", m.path)) + } + return basePath +} + +// Version returns the version part of m. This is either +// a canonical semver version or "none" or the empty string. +func (m Version) Version() string { + return m.version +} + +// IsValid reports whether m is non-zero. +func (m Version) IsValid() bool { + return m.path != "" +} + +// IsCanonical reports whether m is valid and has a canonical +// semver version. +func (m Version) IsCanonical() bool { + return m.IsValid() && m.version != "" && m.version != "none" +} + +func (m Version) IsLocal() bool { + return m.path == "local" +} + +// String returns the string form of the Version: +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.version == "" { + return m.path + } + return m.BasePath() + "@" + m.version +} + +func MustParseVersion(s string) Version { + v, err := ParseVersion(s) + if err != nil { + panic(err) + } + return v +} + +// ParseVersion parses a $module@$version +// string into a Version. +// The version must be canonical (i.e. it can't be +// just a major version). +func ParseVersion(s string) (Version, error) { + basePath, vers, ok := SplitPathVersion(s) + if !ok { + return Version{}, fmt.Errorf("invalid module path@version %q", s) + } + if semver.Canonical(vers) != vers { + return Version{}, fmt.Errorf("module version in %q is not canonical", s) + } + return Version{basePath + "@" + semver.Major(vers), vers}, nil +} + +func MustNewVersion(path string, version string) Version { + v, err := NewVersion(path, version) + if err != nil { + panic(err) + } + return v +} + +// NewVersion forms a Version from the given path and version. +// The version must be canonical, empty or "none". +// If the path doesn't have a major version suffix, one will be added +// if the version isn't empty; if the version is empty, it's an error. +// +// As a special case, the path "local" is used to mean all packages +// held in the gen, pkg and usr directories. +func NewVersion(path string, version string) (Version, error) { + switch { + case path == "local": + if version != "" { + return Version{}, fmt.Errorf("module 'local' cannot have version") + } + case version != "" && version != "none": + if !semver.IsValid(version) { + return Version{}, fmt.Errorf("version %q (of module %q) is not well formed", version, path) + } + if semver.Canonical(version) != version { + return Version{}, fmt.Errorf("version %q (of module %q) is not canonical", version, path) + } + maj := semver.Major(version) + _, vmaj, ok := SplitPathVersion(path) + if ok && maj != vmaj { + return Version{}, fmt.Errorf("mismatched major version suffix in %q (version %v)", path, version) + } + if !ok { + fullPath := path + "@" + maj + if _, _, ok := SplitPathVersion(fullPath); !ok { + return Version{}, fmt.Errorf("cannot form version path from %q, version %v", path, version) + } + path = fullPath + } + default: + base, _, ok := SplitPathVersion(path) + if !ok { + return Version{}, fmt.Errorf("path %q has no major version", path) + } + if base == "local" { + return Version{}, fmt.Errorf("module 'local' cannot have version") + } + } + if version == "" { + if err := CheckPath(path); err != nil { + return Version{}, err + } + } else { + if err := Check(path, version); err != nil { + return Version{}, err + } + } + return Version{ + path: path, + version: version, + }, nil +} + +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/module.cue". +func Sort(list []Version) { + slices.SortFunc(list, func(a, b Version) int { + if c := cmp.Compare(a.path, b.path); c != 0 { + return c + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + va := a.version + vb := b.version + var fa, fb string + if k := strings.Index(va, "/"); k >= 0 { + va, fa = va[:k], va[k:] + } + if k := strings.Index(vb, "/"); k >= 0 { + vb, fb = vb[:k], vb[k:] + } + if c := semver.Compare(va, vb); c != 0 { + return c + } + return cmp.Compare(fa, fb) + }) +} diff --git a/vendor/cuelang.org/go/internal/mod/module/path.go b/vendor/cuelang.org/go/mod/module/path.go similarity index 78% rename from vendor/cuelang.org/go/internal/mod/module/path.go rename to vendor/cuelang.org/go/mod/module/path.go index c63e60112d..d6e6865231 100644 --- a/vendor/cuelang.org/go/internal/mod/module/path.go +++ b/vendor/cuelang.org/go/mod/module/path.go @@ -8,6 +8,7 @@ import ( "unicode" "unicode/utf8" + "cuelang.org/go/cue/ast" "cuelang.org/go/internal/mod/semver" ) @@ -152,10 +153,10 @@ func CheckPathWithoutVersion(basePath string) (err error) { // (ASCII digits) and must not begin with a leading zero. // // Third, no path element may begin with a dot. -// -// TODO we probably need function to check module paths that -// may not contain a major version. func CheckPath(mpath string) (err error) { + if mpath == "local" { + return nil + } defer func() { if err != nil { err = &InvalidPathError{Kind: "module", Path: mpath, Err: err} @@ -192,21 +193,17 @@ func CheckPath(mpath string) (err error) { // // The element prefix up to the first dot must not be a reserved file name // on Windows, regardless of case (CON, com1, NuL, and so on). -func CheckImportPath(path0 string) error { - path := path0 - basePath, vers, ok := SplitPathVersion(path) - if ok { - if semver.Major(vers) != vers { - return &InvalidPathError{ - Kind: "import", - Path: path, - Err: fmt.Errorf("import paths can only contain a major version specifier"), - } +func CheckImportPath(path string) error { + parts := ParseImportPath(path) + if semver.Major(parts.Version) != parts.Version { + return &InvalidPathError{ + Kind: "import", + Path: path, + Err: fmt.Errorf("import paths can only contain a major version specifier"), } - path = basePath } - if err := checkPath(path, importPath); err != nil { - return &InvalidPathError{Kind: "import", Path: path0, Err: err} + if err := checkPath(parts.Path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} } return nil } @@ -397,12 +394,99 @@ func SplitPathVersion(path string) (prefix, version string, ok bool) { return path[:split], path[split+1:], true } -// MatchPathMajor reports whether the semantic version v -// matches the path major version pathMajor. -// -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. -func MatchPathMajor(v, pathMajor string) bool { - return CheckPathMajor(v, pathMajor) == nil +// ImportPath holds the various components of an import path. +type ImportPath struct { + // Path holds the base package/directory path, similar + // to that returned by [Version.BasePath]. + Path string + + // Version holds the version of the import + // or empty if not present. Note: in general this + // will contain a major version only, but there's no + // guarantee of that. + Version string + + // Qualifier holds the package qualifier within the path. + // This will be derived from the last component of Path + // if it wasn't explicitly present in the import path. + // This is not guaranteed to be a valid CUE identifier. + Qualifier string + + // ExplicitQualifier holds whether the qualifier will + // always be added regardless of whether it matches + // the final path element. + ExplicitQualifier bool +} + +// Canonical returns the canonical form of the import path. +// Specifically, it will only include the package qualifier +// if it's different from the last component of parts.Path. +func (parts ImportPath) Canonical() ImportPath { + if i := strings.LastIndex(parts.Path, "/"); i >= 0 && parts.Path[i+1:] == parts.Qualifier { + parts.Qualifier = "" + parts.ExplicitQualifier = false + } + return parts +} + +// Unqualified returns the import path without any package qualifier. +func (parts ImportPath) Unqualified() ImportPath { + parts.Qualifier = "" + parts.ExplicitQualifier = false + return parts +} + +func (parts ImportPath) String() string { + needQualifier := parts.ExplicitQualifier + if !needQualifier && parts.Qualifier != "" { + _, last, _ := cutLast(parts.Path, "/") + if last != "" && last != parts.Qualifier { + needQualifier = true + } + } + if parts.Version == "" && !needQualifier { + // Fast path. + return parts.Path + } + var buf strings.Builder + buf.WriteString(parts.Path) + if parts.Version != "" { + buf.WriteByte('@') + buf.WriteString(parts.Version) + } + if needQualifier { + buf.WriteByte(':') + buf.WriteString(parts.Qualifier) + } + return buf.String() +} + +// ParseImportPath returns the various components of an import path. +// It does not check the result for validity. +func ParseImportPath(p string) ImportPath { + var parts ImportPath + pathWithoutQualifier := p + if i := strings.LastIndexAny(p, "/:"); i >= 0 && p[i] == ':' { + pathWithoutQualifier = p[:i] + parts.Qualifier = p[i+1:] + parts.ExplicitQualifier = true + } + parts.Path = pathWithoutQualifier + if path, version, ok := SplitPathVersion(pathWithoutQualifier); ok { + parts.Version = version + parts.Path = path + } + if !parts.ExplicitQualifier { + if i := strings.LastIndex(parts.Path, "/"); i >= 0 { + parts.Qualifier = parts.Path[i+1:] + } else { + parts.Qualifier = parts.Path + } + if !ast.IsValidIdent(parts.Qualifier) || strings.HasPrefix(parts.Qualifier, "#") { + parts.Qualifier = "" + } + } + return parts } // CheckPathMajor returns a non-nil error if the semantic version v @@ -416,3 +500,10 @@ func CheckPathMajor(v, pathMajor string) error { } return nil } + +func cutLast(s, sep string) (before, after string, found bool) { + if i := strings.LastIndex(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return "", s, false +} diff --git a/vendor/cuelang.org/go/internal/mod/module/versions.go b/vendor/cuelang.org/go/mod/module/versions.go similarity index 100% rename from vendor/cuelang.org/go/internal/mod/module/versions.go rename to vendor/cuelang.org/go/mod/module/versions.go diff --git a/vendor/cuelang.org/go/internal/mod/modzip/zip.go b/vendor/cuelang.org/go/mod/modzip/zip.go similarity index 94% rename from vendor/cuelang.org/go/internal/mod/modzip/zip.go rename to vendor/cuelang.org/go/mod/modzip/zip.go index ef381f86a5..4736a6310f 100644 --- a/vendor/cuelang.org/go/internal/mod/modzip/zip.go +++ b/vendor/cuelang.org/go/mod/modzip/zip.go @@ -4,11 +4,14 @@ // Package modzip provides functions for creating and extracting module zip files. // +// WARNING: THIS PACKAGE IS EXPERIMENTAL. +// ITS API MAY CHANGE AT ANY TIME. +// // Module zip files have several restrictions listed below. These are necessary // to ensure that module zip files can be extracted consistently on supported // platforms and file systems. // -// • All file paths within a zip file must be valid (see cuelang.org/go/internal/mod/module.CheckFilePath). +// • All file paths within a zip file must be valid (see cuelang.org/go/mod/module.CheckFilePath). // // • No two file paths may be equal under Unicode case-folding (see // strings.EqualFold). @@ -40,22 +43,25 @@ package modzip import ( "archive/zip" "bytes" + "cmp" "errors" "fmt" "io" + "io/fs" "os" "path" "path/filepath" + "slices" "strings" "unicode" "unicode/utf8" - "cuelang.org/go/internal/mod/module" + "cuelang.org/go/mod/module" ) const ( // MaxZipFile is the maximum size in bytes of a module zip file. The - // go command will report an error if either the zip file or its extracted + // cue command will report an error if either the zip file or its extracted // content is larger than this. MaxZipFile = 500 << 20 @@ -71,6 +77,9 @@ const ( // File provides an abstraction for a file in a directory, zip, or anything // else that looks like a file - it knows how to open files represented // as a particular type without being a file itself. +// +// Deprecated: this will be removed in a future API iteration that reduces +// dependence on zip archives. type FileIO[F any] interface { // Path returns a clean slash-separated relative path from the module root // directory to the file. @@ -189,6 +198,9 @@ var ( // Note that CheckFiles will not open any files, so Create may still fail when // CheckFiles is successful due to I/O errors, reported size differences // or an invalid module.cue file. +// +// Deprecated: this will be removed in a future API iteration that reduces +// dependence on zip archives. func CheckFiles[F any](files []F, fio FileIO[F]) (CheckedFiles, error) { cf, _, _ := checkFiles(files, fio) return cf, cf.Err() @@ -354,6 +366,9 @@ func checkFiles[F any](files []F, fio FileIO[F]) (cf CheckedFiles, validFiles [] // // Note that CheckDir will not open any files, so CreateFromDir may still fail // when CheckDir is successful due to I/O errors. +// +// Deprecated: this will be removed in a future API iteration that reduces +// dependence on zip archives. func CheckDir(dir string) (CheckedFiles, error) { // List files (as CreateFromDir would) and check which ones are omitted // or invalid. @@ -361,7 +376,7 @@ func CheckDir(dir string) (CheckedFiles, error) { if err != nil { return CheckedFiles{}, err } - cf, cfErr := CheckFiles[dirFile](files, dirFileIO{}) + cf, cfErr := CheckFiles(files, dirFileIO{}) _ = cfErr // ignore this error; we'll generate our own after rewriting paths. // Replace all paths with file system paths. @@ -408,8 +423,6 @@ func CheckZipFile(m module.Version, zipFile string) (CheckedFiles, error) { // // Note that checkZip does not read individual files, so zip.Unzip may still fail // when checkZip is successful due to I/O errors. -// -// TODO update this for new semantics (no top level module directory). func CheckZip(m module.Version, r io.ReaderAt, zipSize int64) (*zip.Reader, *zip.File, CheckedFiles, error) { if zipSize > MaxZipFile { cf := CheckedFiles{SizeError: fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile)} @@ -497,7 +510,8 @@ func CheckZip(m module.Version, r io.ReaderAt, zipSize int64) (*zip.Reader, *zip } // Create builds a zip archive for module m from an abstract list of files -// and writes it to w. +// and writes it to w, after first sorting the slice of files in a path-aware +// lexical fashion (files first, then directories, both sorted lexically). // // Note that m.Version is checked for validity but only the major version // is used for checking correctness of the cue.mod/module.cue file. @@ -508,6 +522,9 @@ func CheckZip(m module.Version, r io.ReaderAt, zipSize int64) (*zip.Reader, *zip // In particular, Create will not include files in modules found in // subdirectories, most files in vendor directories, or irregular files (such // as symbolic links) in the output archive. +// +// Deprecated: this will be removed in a future API iteration that reduces +// dependence on zip archives. func Create[F any](w io.Writer, m module.Version, files []F, fio FileIO[F]) (err error) { defer func() { if err != nil { @@ -515,6 +532,18 @@ func Create[F any](w io.Writer, m module.Version, files []F, fio FileIO[F]) (err } }() + files = slices.Clone(files) + slices.SortFunc(files, func(a, b F) int { + ap := fio.Path(a) + bp := fio.Path(b) + ca := strings.Count(ap, string(filepath.Separator)) + cb := strings.Count(ap, string(filepath.Separator)) + if c := cmp.Compare(ca, cb); c != 0 { + return c + } + return cmp.Compare(ap, bp) + }) + // Check whether files are valid, not valid, or should be omitted. // Also check that the valid files don't exceed the maximum size. cf, validFiles, validSizes := checkFiles(files, fio) @@ -581,18 +610,18 @@ func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) { return err } - return Create[dirFile](w, m, files, dirFileIO{}) + return Create(w, m, files, dirFileIO{}) } type dirFile struct { filePath, slashPath string - info os.FileInfo + entry fs.DirEntry } type dirFileIO struct{} func (dirFileIO) Path(f dirFile) string { return f.slashPath } -func (dirFileIO) Lstat(f dirFile) (os.FileInfo, error) { return f.info, nil } +func (dirFileIO) Lstat(f dirFile) (os.FileInfo, error) { return f.entry.Info() } func (dirFileIO) Open(f dirFile) (io.ReadCloser, error) { return os.Open(f.filePath) } // isVendoredPackage reports whether the given filename is inside @@ -724,7 +753,7 @@ func (cc collisionChecker) check(p string, isDir bool) error { // files, as well as a list of directories and files that were skipped (for // example, nested modules and symbolic links). func listFilesInDir(dir string) (files []dirFile, omitted []FileError, err error) { - err = filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(filePath string, entry fs.DirEntry, err error) error { if err != nil { return err } @@ -741,7 +770,7 @@ func listFilesInDir(dir string) (files []dirFile, omitted []FileError, err error return nil } - if info.IsDir() { + if entry.IsDir() { if filePath == dir { // Don't skip the top-level directory. return nil @@ -766,7 +795,7 @@ func listFilesInDir(dir string) (files []dirFile, omitted []FileError, err error // Skip irregular files and files in vendor directories. // Irregular files are ignored. They're typically symbolic links. - if !info.Mode().IsRegular() { + if !entry.Type().IsRegular() { omitted = append(omitted, FileError{Path: slashPath, Err: errNotRegular}) return nil } @@ -774,7 +803,7 @@ func listFilesInDir(dir string) (files []dirFile, omitted []FileError, err error files = append(files, dirFile{ filePath: filePath, slashPath: slashPath, - info: info, + entry: entry, }) return nil }) @@ -862,20 +891,3 @@ func splitCUEMod(p string) (string, string) { s = dir } } - -// ZipFileIO implements FileIO for *zip.File. -type ZipFileIO struct { - // StripPrefix causes the given prefix to be stripped from - // all file names with that prefix. - StripPrefix string -} - -func (fio ZipFileIO) Path(f *zip.File) string { - return strings.TrimPrefix(f.Name, fio.StripPrefix) -} -func (ZipFileIO) Lstat(f *zip.File) (os.FileInfo, error) { - return f.FileInfo(), nil -} -func (ZipFileIO) Open(f *zip.File) (io.ReadCloser, error) { - return f.Open() -} diff --git a/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go b/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go index 5912c48dc2..c65c3621eb 100644 --- a/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go +++ b/vendor/cuelang.org/go/pkg/encoding/yaml/manual.go @@ -23,7 +23,6 @@ import ( "cuelang.org/go/cue/errors" cueyaml "cuelang.org/go/internal/encoding/yaml" "cuelang.org/go/internal/pkg" - "cuelang.org/go/internal/third_party/yaml" ) // Marshal returns the YAML encoding of v. @@ -64,16 +63,12 @@ func MarshalStream(v cue.Value) (string, error) { // Unmarshal parses the YAML to a CUE expression. func Unmarshal(data []byte) (ast.Expr, error) { - return yaml.Unmarshal("", data) + return cueyaml.Unmarshal("", data) } // UnmarshalStream parses the YAML to a CUE list expression on success. func UnmarshalStream(data []byte) (ast.Expr, error) { - d, err := yaml.NewDecoder("", data) - if err != nil { - return nil, err - } - + d := cueyaml.NewDecoder("", data) a := []ast.Expr{} for { x, err := d.Decode() @@ -92,10 +87,7 @@ func UnmarshalStream(data []byte) (ast.Expr, error) { // Validate validates YAML and confirms it is an instance of the schema // specified by v. If the YAML source is a stream, every object must match v. func Validate(b []byte, v cue.Value) (bool, error) { - d, err := yaml.NewDecoder("yaml.Validate", b) - if err != nil { - return false, err - } + d := cueyaml.NewDecoder("yaml.Validate", b) r := v.Context() for { expr, err := d.Decode() @@ -141,10 +133,7 @@ func Validate(b []byte, v cue.Value) (bool, error) { // but does not have to be an instance of v. If the YAML source is a stream, // every object must match v. func ValidatePartial(b []byte, v cue.Value) (bool, error) { - d, err := yaml.NewDecoder("yaml.ValidatePartial", b) - if err != nil { - return false, err - } + d := cueyaml.NewDecoder("yaml.ValidatePartial", b) r := v.Context() for { expr, err := d.Decode() diff --git a/vendor/cuelang.org/go/pkg/list/math.go b/vendor/cuelang.org/go/pkg/list/math.go index 602ebd6a93..17de18a923 100644 --- a/vendor/cuelang.org/go/pkg/list/math.go +++ b/vendor/cuelang.org/go/pkg/list/math.go @@ -24,7 +24,7 @@ import ( // Avg returns the average value of a non empty list xs. func Avg(xs []*internal.Decimal) (*internal.Decimal, error) { - if 0 == len(xs) { + if len(xs) == 0 { return nil, fmt.Errorf("empty list") } @@ -47,13 +47,13 @@ func Avg(xs []*internal.Decimal) (*internal.Decimal, error) { // Max returns the maximum value of a non empty list xs. func Max(xs []*internal.Decimal) (*internal.Decimal, error) { - if 0 == len(xs) { + if len(xs) == 0 { return nil, fmt.Errorf("empty list") } max := xs[0] for _, x := range xs[1:] { - if -1 == max.Cmp(x) { + if max.Cmp(x) == -1 { max = x } } @@ -62,13 +62,13 @@ func Max(xs []*internal.Decimal) (*internal.Decimal, error) { // Min returns the minimum value of a non empty list xs. func Min(xs []*internal.Decimal) (*internal.Decimal, error) { - if 0 == len(xs) { + if len(xs) == 0 { return nil, fmt.Errorf("empty list") } min := xs[0] for _, x := range xs[1:] { - if +1 == min.Cmp(x) { + if min.Cmp(x) == +1 { min = x } } @@ -102,22 +102,22 @@ func Range(start, limit, step *internal.Decimal) ([]*internal.Decimal, error) { return nil, fmt.Errorf("step must be non zero") } - if !step.Negative && +1 == start.Cmp(limit) { + if !step.Negative && start.Cmp(limit) == +1 { return nil, fmt.Errorf("end must be greater than start when step is positive") } - if step.Negative && -1 == start.Cmp(limit) { + if step.Negative && start.Cmp(limit) == -1 { return nil, fmt.Errorf("end must be less than start when step is negative") } var vals []*internal.Decimal num := start for { - if !step.Negative && -1 != num.Cmp(limit) { + if !step.Negative && num.Cmp(limit) != -1 { break } - if step.Negative && +1 != num.Cmp(limit) { + if step.Negative && num.Cmp(limit) != +1 { break } diff --git a/vendor/cuelang.org/go/pkg/list/sort.go b/vendor/cuelang.org/go/pkg/list/sort.go index 0cc40400df..d47c4985ad 100644 --- a/vendor/cuelang.org/go/pkg/list/sort.go +++ b/vendor/cuelang.org/go/pkg/list/sort.go @@ -22,6 +22,7 @@ import ( "sort" "cuelang.org/go/cue" + "cuelang.org/go/internal" "cuelang.org/go/internal/core/adt" "cuelang.org/go/internal/types" ) @@ -52,6 +53,11 @@ func (s *valueSorter) Less(i, j int) bool { if s.err != nil { return false } + + if s.ctx.Version == internal.DevVersion { + return s.lessNew(i, j) + } + var x, y types.Value s.a[i].Core(&x) s.a[j].Core(&y) @@ -96,6 +102,54 @@ func (s *valueSorter) Less(i, j int) bool { return isLess } +func (s *valueSorter) lessNew(i, j int) bool { + ctx := s.ctx + + n := &adt.Vertex{ + Label: s.cmp.Label, + Parent: s.cmp.Parent, + Conjuncts: s.cmp.Conjuncts, + } + + n.Init(ctx) + + less := getArc(ctx, n, "less") + xa := getArc(ctx, n, "x") + ya := getArc(ctx, n, "y") + + var x, y types.Value + s.a[i].Core(&x) + s.a[j].Core(&y) + + for _, c := range x.V.Conjuncts { + xa.AddConjunct(c) + } + for _, c := range y.V.Conjuncts { + ya.AddConjunct(c) + } + + // TODO(perf): if we can determine that the comparator values for + // x and y are idempotent (no arcs and a basevalue being top or + // a struct or list marker), then we do not need to reevaluate the input. + // In that case, we can use the below code instead of the above two loops + // setting the conjuncts. This may improve performance significantly. + // + // s.x.BaseValue = x.V.BaseValue + // s.x.Arcs = x.V.Arcs + // s.y.BaseValue = y.V.BaseValue + // s.y.Arcs = y.V.Arcs + + less.Finalize(s.ctx) + + isLess := s.ctx.BoolValue(less) + if b := less.Err(s.ctx); b != nil && s.err == nil { + s.err = b.Err + return true + } + + return isLess +} + var less = cue.ParsePath("less") func makeValueSorter(list []cue.Value, cmp cue.Value) (s valueSorter) { @@ -157,7 +211,7 @@ func getArc(ctx *adt.OpContext, v *adt.Vertex, s string) *adt.Vertex { return arc } -// Deprecated: use Sort, which is always stable +// Deprecated: use [Sort], which is always stable func SortStable(list []cue.Value, cmp cue.Value) (sorted []cue.Value, err error) { s := makeValueSorter(list, cmp) sort.Stable(&s) diff --git a/vendor/cuelang.org/go/pkg/math/manual.go b/vendor/cuelang.org/go/pkg/math/manual.go index 474bc7d82b..5d163d2d05 100644 --- a/vendor/cuelang.org/go/pkg/math/manual.go +++ b/vendor/cuelang.org/go/pkg/math/manual.go @@ -114,11 +114,18 @@ func RoundToEven(x *internal.Decimal) (*big.Int, error) { return toInt(&d), err } -var mulContext = internal.BaseContext.WithPrecision(1) - // MultipleOf reports whether x is a multiple of y. func MultipleOf(x, y *internal.Decimal) (bool, error) { var d apd.Decimal - cond, err := mulContext.Quo(&d, x, y) - return !cond.Inexact(), err + + // TODO: It would be preferable to use internal.BaseContext.Rem here, and directly + // check the result for 0. However, this currently fails with "division impossible". + // Fix this when https://github.com/cockroachdb/apd/issues/134 is resolved. + _, err := internal.BaseContext.Quo(&d, x, y) + if err != nil { + return false, err + } + var frac apd.Decimal + d.Modf(nil, &frac) + return frac.IsZero(), nil } diff --git a/vendor/cuelang.org/go/pkg/net/host.go b/vendor/cuelang.org/go/pkg/net/host.go index 70be5f2b6a..aef0ad9918 100644 --- a/vendor/cuelang.org/go/pkg/net/host.go +++ b/vendor/cuelang.org/go/pkg/net/host.go @@ -56,7 +56,7 @@ func JoinHostPort(host, port cue.Value) (string, error) { switch host.Kind() { case cue.ListKind: ipdata := netGetIP(host) - if len(ipdata) != 4 && len(ipdata) != 16 { + if !ipdata.IsValid() { err = fmt.Errorf("invalid host %s", host) } hostStr = ipdata.String() diff --git a/vendor/cuelang.org/go/pkg/net/ip.go b/vendor/cuelang.org/go/pkg/net/ip.go index 4fbb6179d2..89c587be16 100644 --- a/vendor/cuelang.org/go/pkg/net/ip.go +++ b/vendor/cuelang.org/go/pkg/net/ip.go @@ -17,7 +17,7 @@ package net import ( "fmt" - "net" + "net/netip" "cuelang.org/go/cue" ) @@ -28,76 +28,81 @@ const ( IPv6len = 16 ) -func netGetIP(ip cue.Value) (goip net.IP) { +func netGetIP(ip cue.Value) (goip netip.Addr) { switch ip.Kind() { case cue.StringKind: s, err := ip.String() if err != nil { - return nil + return netip.Addr{} } - goip := net.ParseIP(s) - if goip == nil { - return nil + goip, err := netip.ParseAddr(s) + if err != nil { + return netip.Addr{} } return goip case cue.BytesKind: b, err := ip.Bytes() if err != nil { - return nil + return netip.Addr{} } - goip := net.ParseIP(string(b)) - if goip == nil { - return nil + goip, err := netip.ParseAddr(string(b)) + if err != nil { + return netip.Addr{} } return goip case cue.ListKind: iter, err := ip.List() if err != nil { - return nil + return netip.Addr{} } + var bytes []byte for iter.Next() { v, err := iter.Value().Int64() if err != nil { - return nil + return netip.Addr{} } if v < 0 || 255 < v { - return nil + return netip.Addr{} } - goip = append(goip, byte(v)) + bytes = append(bytes, byte(v)) + } + goip, ok := netip.AddrFromSlice(bytes) + if !ok { + return netip.Addr{} } return goip default: // TODO: return canonical invalid type. - return nil + return netip.Addr{} } } -func netGetIPCIDR(ip cue.Value) (gonet *net.IPNet, err error) { +func netGetIPCIDR(ip cue.Value) (gonet *netip.Prefix, err error) { switch ip.Kind() { case cue.StringKind: s, err := ip.String() if err != nil { return nil, err } - _, gonet, err := net.ParseCIDR(s) + cidr, err := netip.ParsePrefix(s) if err != nil { return nil, err } - return gonet, nil + return &cidr, nil case cue.BytesKind: b, err := ip.Bytes() if err != nil { return nil, err } - _, gonet, err := net.ParseCIDR(string(b)) + cidr, err := netip.ParsePrefix(string(b)) if err != nil { return nil, err } - return gonet, nil + return &cidr, nil default: // TODO: return canonical invalid type. @@ -111,14 +116,14 @@ func netGetIPCIDR(ip cue.Value) (gonet *net.IPNet, err error) { // If s is not a valid textual representation of an IP address, // ParseIP returns nil. func ParseIP(s string) ([]uint, error) { - goip := net.ParseIP(s) - if goip == nil { + goip, err := netip.ParseAddr(s) + if err != nil { return nil, fmt.Errorf("invalid IP address %q", s) } - return netToList(goip), nil + return netToList(goip.AsSlice()), nil } -func netToList(ip net.IP) []uint { +func netToList(ip []byte) []uint { a := make([]uint, len(ip)) for i, p := range ip { a[i] = uint(p) @@ -126,20 +131,27 @@ func netToList(ip net.IP) []uint { return a } -// IPv4 reports whether s is a valid IPv4 address. +// IPv4 reports whether ip is a valid IPv4 address. // // The address may be a string or list of bytes. func IPv4(ip cue.Value) bool { // TODO: convert to native CUE. - return netGetIP(ip).To4() != nil + return netGetIP(ip).Is4() +} + +// IPv6 reports whether ip is a valid IPv6 address. +// +// The address may be a string or list of bytes. +func IPv6(ip cue.Value) bool { + return netGetIP(ip).Is6() } -// IP reports whether s is a valid IPv4 or IPv6 address. +// IP reports whether ip is a valid IPv4 or IPv6 address. // // The address may be a string or list of bytes. func IP(ip cue.Value) bool { // TODO: convert to native CUE. - return netGetIP(ip) != nil + return netGetIP(ip).IsValid() } // IPCIDR reports whether ip is a valid IPv4 or IPv6 address with CIDR subnet notation. @@ -196,24 +208,25 @@ func UnspecifiedIP(ip cue.Value) bool { // 4-byte representation. func ToIP4(ip cue.Value) ([]uint, error) { ipdata := netGetIP(ip) - if ipdata == nil { + if !ipdata.IsValid() { return nil, fmt.Errorf("invalid IP %q", ip) } - ipv4 := ipdata.To4() - if ipv4 == nil { + if !ipdata.Is4() { return nil, fmt.Errorf("cannot convert %q to IPv4", ipdata) } - return netToList(ipv4), nil + as4 := ipdata.As4() + return netToList(as4[:]), nil } // ToIP16 converts a given IP address, which may be a string or a list, to its // 16-byte representation. func ToIP16(ip cue.Value) ([]uint, error) { ipdata := netGetIP(ip) - if ipdata == nil { + if !ipdata.IsValid() { return nil, fmt.Errorf("invalid IP %q", ip) } - return netToList(ipdata), nil + as16 := ipdata.As16() + return netToList(as16[:]), nil } // IPString returns the string form of the IP address ip. It returns one of 4 forms: @@ -224,7 +237,7 @@ func ToIP16(ip cue.Value) ([]uint, error) { // - the hexadecimal form of ip, without punctuation, if no other cases apply func IPString(ip cue.Value) (string, error) { ipdata := netGetIP(ip) - if ipdata == nil { + if !ipdata.IsValid() { return "", fmt.Errorf("invalid IP %q", ip) } return ipdata.String(), nil diff --git a/vendor/cuelang.org/go/pkg/net/pkg.go b/vendor/cuelang.org/go/pkg/net/pkg.go index 4cd172cb8d..a1371998b9 100644 --- a/vendor/cuelang.org/go/pkg/net/pkg.go +++ b/vendor/cuelang.org/go/pkg/net/pkg.go @@ -81,6 +81,18 @@ var p = &pkg.Package{ c.Ret = IPv4(ip) } }, + }, { + Name: "IPv6", + Params: []pkg.Param{ + {Kind: adt.TopKind}, + }, + Result: adt.BoolKind, + Func: func(c *pkg.CallCtxt) { + ip := c.Value(0) + if c.Do() { + c.Ret = IPv6(ip) + } + }, }, { Name: "IP", Params: []pkg.Param{ diff --git a/vendor/cuelang.org/go/pkg/path/path.go b/vendor/cuelang.org/go/pkg/path/path.go index 4d1647c198..247dd7b3ff 100644 --- a/vendor/cuelang.org/go/pkg/path/path.go +++ b/vendor/cuelang.org/go/pkg/path/path.go @@ -16,13 +16,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package filepath implements utility routines for manipulating filename paths -// in a way compatible with the target operating system-defined file paths. -// -// The filepath package uses either forward slashes or backslashes, -// depending on the operating system. To process paths such as URLs -// that always use forward slashes regardless of the operating -// system, see the path package. +// Package path implements utility routines for manipulating filename paths as +// defined by targetted operating systems, and also paths that always use +// forward slashes regardless of the operating system, such as URLs. package path import ( diff --git a/vendor/cuelang.org/go/pkg/path/pkg.go b/vendor/cuelang.org/go/pkg/path/pkg.go index 9cdd35016b..813242e370 100644 --- a/vendor/cuelang.org/go/pkg/path/pkg.go +++ b/vendor/cuelang.org/go/pkg/path/pkg.go @@ -122,6 +122,7 @@ var p = &pkg.Package{ }, { Name: "Match", Params: []pkg.Param{ + {Kind: adt.StringKind}, {Kind: adt.StringKind}, {Kind: adt.StringKind, Value: unixDefault}, }, diff --git a/vendor/cuelang.org/go/pkg/tool/exec/exec.cue b/vendor/cuelang.org/go/pkg/tool/exec/exec.cue index 62e3af531c..c98008371b 100644 --- a/vendor/cuelang.org/go/pkg/tool/exec/exec.cue +++ b/vendor/cuelang.org/go/pkg/tool/exec/exec.cue @@ -29,7 +29,7 @@ Run: { // If the value is a list, the entries mus be of the form key=value, // where the last value takes precendence in the case of multiple // occurrances of the same key. - env: [string]: string | [...=~"="] + env: {[string]: string} | [...=~"="] // stdout captures the output from stdout if it is of type bytes or string. // The default value of null indicates it is redirected to the stdout of the diff --git a/vendor/cuelang.org/go/pkg/tool/exec/exec.go b/vendor/cuelang.org/go/pkg/tool/exec/exec.go index 5c5f5ee363..a596a8caa6 100644 --- a/vendor/cuelang.org/go/pkg/tool/exec/exec.go +++ b/vendor/cuelang.org/go/pkg/tool/exec/exec.go @@ -45,13 +45,8 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) { // TODO: set environment variables, if defined. stream := func(name string) (stream cue.Value, ok bool) { - c := ctx.Obj.Lookup(name) - // Although the schema defines a default versions, older implementations - // may not use it yet. - if !c.Exists() { - return - } - if err := c.Null(); ctx.Err != nil || err == nil { + c := ctx.Obj.LookupPath(cue.ParsePath(name)) + if err := c.Null(); c.Err() != nil || err == nil { return } return c, true @@ -71,14 +66,10 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) { cmd.Stderr = ctx.Stderr } - // TODO(mvdan): exec.Run declares mustSucceed as a regular field with a default of true. - // We should be able to rely on that here, removing the need for Exists and repeating the default. - mustSucceed := true - if v := ctx.Obj.LookupPath(cue.ParsePath("mustSucceed")); v.Exists() { - mustSucceed, err = v.Bool() - if err != nil { - return nil, errors.Wrapf(err, v.Pos(), "invalid bool value") - } + v := ctx.Obj.LookupPath(cue.ParsePath("mustSucceed")) + mustSucceed, err := v.Bool() + if err != nil { + return nil, errors.Wrapf(err, v.Pos(), "invalid bool value") } update := map[string]interface{}{} @@ -102,6 +93,7 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) { update["stderr"] = err.Error() } } + if !mustSucceed { return update, nil } @@ -152,13 +144,14 @@ func mkCommand(ctx *task.Context) (c *exec.Cmd, doc string, err error) { cmd := exec.CommandContext(ctx.Context, bin, args...) - cmd.Dir, _ = ctx.Obj.Lookup("dir").String() + cmd.Dir, _ = ctx.Obj.LookupPath(cue.ParsePath("dir")).String() - env := ctx.Obj.Lookup("env") + env := ctx.Obj.LookupPath(cue.ParsePath("env")) // List case. for iter, _ := env.List(); iter.Next(); { - str, err := iter.Value().String() + v, _ := iter.Value().Default() + str, err := v.String() if err != nil { return nil, "", errors.Wrapf(err, v.Pos(), "invalid environment variable value %q", v) @@ -167,9 +160,9 @@ func mkCommand(ctx *task.Context) (c *exec.Cmd, doc string, err error) { } // Struct case. - for iter, _ := ctx.Obj.Lookup("env").Fields(); iter.Next(); { + for iter, _ := env.Fields(); iter.Next(); { label := iter.Label() - v := iter.Value() + v, _ := iter.Value().Default() var str string switch v.Kind() { case cue.StringKind: diff --git a/vendor/cuelang.org/go/pkg/tool/exec/pkg.go b/vendor/cuelang.org/go/pkg/tool/exec/pkg.go index 690b8de78c..c20f7299ab 100644 --- a/vendor/cuelang.org/go/pkg/tool/exec/pkg.go +++ b/vendor/cuelang.org/go/pkg/tool/exec/pkg.go @@ -19,7 +19,7 @@ // // If the value is a list, the entries mus be of the form key=value, // // where the last value takes precendence in the case of multiple // // occurrances of the same key. -// env: [string]: string | [...=~"="] +// env: {[string]: string} | [...=~"="] // // // stdout captures the output from stdout if it is of type bytes or string. // // The default value of null indicates it is redirected to the stdout of the @@ -64,8 +64,8 @@ var p = &pkg.Package{ cmd: string | [string, ...string] dir?: string env: { - [string]: string | [...=~"="] - } + [string]: string + } | [...=~"="] stdout: *null | string | bytes stderr: *null | string | bytes stdin: *null | string | bytes diff --git a/vendor/cuelang.org/go/pkg/tool/http/http.cue b/vendor/cuelang.org/go/pkg/tool/http/http.cue index 19bcd03362..04816de7d3 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/http.cue +++ b/vendor/cuelang.org/go/pkg/tool/http/http.cue @@ -14,9 +14,9 @@ package http -Get: Do & {method: "GET"} -Post: Do & {method: "POST"} -Put: Do & {method: "PUT"} +Get: Do & {method: "GET"} +Post: Do & {method: "POST"} +Put: Do & {method: "PUT"} Delete: Do & {method: "DELETE"} Do: { @@ -35,7 +35,7 @@ Do: { request: { body?: bytes | string - header: [string]: string | [...string] + header: [string]: string | [...string] trailer: [string]: string | [...string] } response: { @@ -43,7 +43,7 @@ Do: { statusCode: int body: *bytes | string - header: [string]: string | [...string] + header: [string]: string | [...string] trailer: [string]: string | [...string] } } diff --git a/vendor/cuelang.org/go/pkg/tool/http/http.go b/vendor/cuelang.org/go/pkg/tool/http/http.go index 2ef3b70d30..83e381e16c 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/http.go +++ b/vendor/cuelang.org/go/pkg/tool/http/http.go @@ -47,8 +47,8 @@ func (c *httpCmd) Run(ctx *task.Context) (res interface{}, err error) { u = ctx.String("url") ) var r io.Reader - if obj := ctx.Obj.Lookup("request"); obj.Exists() { - if v := obj.Lookup("body"); v.Exists() { + if obj := ctx.Obj.LookupPath(cue.MakePath(cue.Str("request"))); obj.Exists() { + if v := obj.LookupPath(cue.MakePath(cue.Str("body"))); v.Exists() { r, err = v.Reader() if err != nil { return nil, err @@ -65,7 +65,7 @@ func (c *httpCmd) Run(ctx *task.Context) (res interface{}, err error) { } var caCert []byte - caCertValue := ctx.Obj.LookupPath(cue.ParsePath("tls.caCert")) + caCertValue := ctx.Obj.LookupPath(cue.MakePath(cue.Str("tls"), cue.Str("caCert"))) if caCertValue.Exists() { caCert, err = caCertValue.Bytes() if err != nil { @@ -74,7 +74,7 @@ func (c *httpCmd) Run(ctx *task.Context) (res interface{}, err error) { } tlsVerify := true - tlsVerifyValue := ctx.Obj.LookupPath(cue.ParsePath("tls.verify")) + tlsVerifyValue := ctx.Obj.LookupPath(cue.MakePath(cue.Str("tls"), cue.Str("verify"))) if tlsVerifyValue.Exists() { tlsVerify, err = tlsVerifyValue.Bool() if err != nil { @@ -143,7 +143,7 @@ func (c *httpCmd) Run(ctx *task.Context) (res interface{}, err error) { } func parseHeaders(obj cue.Value, label string) (http.Header, error) { - m := obj.Lookup(label) + m := obj.LookupPath(cue.MakePath(cue.Str(label))) if !m.Exists() { return nil, nil } diff --git a/vendor/cuelang.org/go/pkg/tool/http/pkg.go b/vendor/cuelang.org/go/pkg/tool/http/pkg.go index 49aba71bf2..dd231746b4 100644 --- a/vendor/cuelang.org/go/pkg/tool/http/pkg.go +++ b/vendor/cuelang.org/go/pkg/tool/http/pkg.go @@ -4,9 +4,9 @@ // // These are the supported tasks: // -// Get: Do & {method: "GET"} -// Post: Do & {method: "POST"} -// Put: Do & {method: "PUT"} +// Get: Do & {method: "GET"} +// Post: Do & {method: "POST"} +// Put: Do & {method: "PUT"} // Delete: Do & {method: "DELETE"} // // Do: { @@ -25,7 +25,7 @@ // // request: { // body?: bytes | string -// header: [string]: string | [...string] +// header: [string]: string | [...string] // trailer: [string]: string | [...string] // } // response: { @@ -33,7 +33,7 @@ // statusCode: int // // body: *bytes | string -// header: [string]: string | [...string] +// header: [string]: string | [...string] // trailer: [string]: string | [...string] // } // } diff --git a/vendor/cuelang.org/go/pkg/tool/os/env.go b/vendor/cuelang.org/go/pkg/tool/os/env.go index 40cdc8e6cb..270ae3dc2a 100644 --- a/vendor/cuelang.org/go/pkg/tool/os/env.go +++ b/vendor/cuelang.org/go/pkg/tool/os/env.go @@ -99,7 +99,7 @@ func (c *environCmd) Run(ctx *task.Context) (res interface{}, err error) { for _, kv := range os.Environ() { name, str, _ := strings.Cut(kv, "=") - if v := ctx.Obj.Lookup(name); v.Exists() { + if v := ctx.Obj.LookupPath(cue.MakePath(cue.Str(name))); v.Exists() { update[name], err = fromString(name, str, v) if err != nil { return nil, err diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go deleted file mode 100644 index 5706fe2c7b..0000000000 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go +++ /dev/null @@ -1,51 +0,0 @@ -package helper - -import ( - "fmt" - "github.com/aliyun/credentials-go/credentials" - "os" -) - -const ( - EnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" - EnvOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" - EnvOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" -) - -func HaveOidcCredentialRequiredEnv() bool { - return os.Getenv(EnvRoleArn) != "" && - os.Getenv(EnvOidcProviderArn) != "" && - os.Getenv(EnvOidcTokenFile) != "" -} - -func NewOidcCredential(sessionName string) (credential credentials.Credential, err error) { - return GetOidcCredential(sessionName) -} - -// Deprecated: Use NewOidcCredential instead -func GetOidcCredential(sessionName string) (credential credentials.Credential, err error) { - roleArn := os.Getenv(EnvRoleArn) - oidcArn := os.Getenv(EnvOidcProviderArn) - tokenFile := os.Getenv(EnvOidcTokenFile) - if roleArn == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvRoleArn) - } - if oidcArn == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvOidcProviderArn) - } - if tokenFile == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvOidcTokenFile) - } - if _, err := os.Stat(tokenFile); err != nil { - return nil, fmt.Errorf("unable to read file at %q: %s", tokenFile, err) - } - - config := new(credentials.Config). - SetType("oidc_role_arn"). - SetOIDCProviderArn(oidcArn). - SetOIDCTokenFilePath(tokenFile). - SetRoleArn(roleArn). - SetRoleSessionName(sessionName) - - return credentials.NewCredential(config) -} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/LICENSE similarity index 100% rename from vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE rename to vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/LICENSE diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go new file mode 100644 index 0000000000..71719adc9f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go @@ -0,0 +1,27 @@ +package provider + +import ( + "context" + "errors" +) + +type AccessKeyProvider struct { + cred *Credentials +} + +func NewAccessKeyProvider(accessKeyId, accessKeySecret string) *AccessKeyProvider { + return &AccessKeyProvider{ + cred: &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + }, + } +} + +func (a *AccessKeyProvider) Credentials(ctx context.Context) (*Credentials, error) { + if a.cred.AccessKeyId == "" || a.cred.AccessKeySecret == "" { + return nil, NewNotEnableError(errors.New("AccessKeyId or AccessKeySecret is empty")) + } + + return a.cred.DeepCopy(), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go new file mode 100644 index 0000000000..92cfcbe4cc --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go @@ -0,0 +1,220 @@ +package provider + +import ( + "context" + "fmt" + "strings" + "sync" + "time" +) + +var defaultRuntimeSwitchCacheDuration = time.Minute * 15 + +type ChainProvider struct { + providers []CredentialsProvider + + currentProvider CredentialsProvider + Logger Logger + logPrefix string + + enableRuntimeSwitch bool + runtimeSwitchCacheDuration time.Duration + lastSelectProviderTime time.Time + + lock sync.RWMutex +} + +type ChainProviderOptions struct { + EnableRuntimeSwitch bool + RuntimeSwitchCacheDuration time.Duration + + logPrefix string +} + +func NewChainProvider(providers ...CredentialsProvider) *ChainProvider { + return NewChainProviderWithOptions(providers, ChainProviderOptions{}) +} + +func NewChainProviderWithOptions(providers []CredentialsProvider, opts ChainProviderOptions) *ChainProvider { + opts.applyDefaults() + + if len(providers) == 0 { + return NewDefaultChainProvider(DefaultChainProviderOptions{ + EnableRuntimeSwitch: opts.EnableRuntimeSwitch, + RuntimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + }) + } + return &ChainProvider{ + enableRuntimeSwitch: opts.EnableRuntimeSwitch, + runtimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + providers: providers, + logPrefix: opts.logPrefix, + } +} + +func (c *ChainProvider) Credentials(ctx context.Context) (*Credentials, error) { + return c.getCredentials(ctx) +} + +func (c *ChainProvider) SelectProvider(ctx context.Context) (CredentialsProvider, error) { + return c.selectProvider(ctx) +} + +func (c *ChainProvider) Stop(ctx context.Context) { + c.logger().Debug(fmt.Sprintf("%s start to stop...", c.logPrefix)) + + for _, p := range c.providers { + if s, ok := p.(Stopper); ok { + s.Stop(ctx) + } + } +} + +func (c *ChainProvider) getCredentials(ctx context.Context) (*Credentials, error) { + p := c.getCurrentProvider() + if p != nil { + return p.Credentials(ctx) + } + + p, err := c.selectProvider(ctx) + if err != nil { + return nil, err + } + c.setCurrentProvider(p) + + return p.Credentials(ctx) +} + +func (c *ChainProvider) selectProvider(ctx context.Context) (CredentialsProvider, error) { + var notEnableErrors []string + for _, p := range c.providers { + if _, err := p.Credentials(ctx); err != nil { + if IsNotEnableError(err) { + c.logger().Debug(fmt.Sprintf("%s provider %T is not enabled will try to next: %s", + c.logPrefix, p, err.Error())) + notEnableErrors = append(notEnableErrors, fmt.Sprintf("provider %T is not enabled: %s", p, err.Error())) + continue + } + } + return p, nil + } + + err := fmt.Errorf("no available credentials provider: [%s]", strings.Join(notEnableErrors, ", ")) + return nil, NewNoAvailableProviderError(err) +} + +func (c *ChainProvider) getCurrentProvider() CredentialsProvider { + c.lock.RLock() + defer c.lock.RUnlock() + + if !c.enableRuntimeSwitch { + return c.currentProvider + } + + p := c.currentProvider + if p == nil { + return nil + } + if c.lastSelectProviderTime.IsZero() { + return nil + } + if time.Since(c.lastSelectProviderTime) >= c.runtimeSwitchCacheDuration { + c.logger().Debug(fmt.Sprintf("%s trigger select provider again", c.logPrefix)) + return nil + } + + return p +} + +func (c *ChainProvider) setCurrentProvider(p CredentialsProvider) { + c.lock.Lock() + defer c.lock.Unlock() + + prePT := fmt.Sprintf("%T", c.currentProvider) + pT := fmt.Sprintf("%T", p) + if prePT != pT { + c.logger().Info(fmt.Sprintf("%s switch to using new provider: %s -> %s", c.logPrefix, prePT, pT)) + } + + c.lastSelectProviderTime = time.Now() + c.currentProvider = p +} + +func (c *ChainProvider) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLog +} + +type DefaultChainProviderOptions struct { + EnableRuntimeSwitch bool + RuntimeSwitchCacheDuration time.Duration + + STSEndpoint string + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger + + logPrefix string +} + +func NewDefaultChainProvider(opts DefaultChainProviderOptions) *ChainProvider { + opts.applyDefaults() + + p := NewChainProviderWithOptions( + []CredentialsProvider{ + NewEnvProvider(EnvProviderOptions{}), + NewOIDCProvider(OIDCProviderOptions{ + STSEndpoint: opts.STSEndpoint, + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + NewEncryptedFileProvider(EncryptedFileProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + NewECSMetadataProvider(ECSMetadataProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + }, + ChainProviderOptions{ + EnableRuntimeSwitch: opts.EnableRuntimeSwitch, + RuntimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + logPrefix: opts.logPrefix, + }, + ) + p.Logger = opts.Logger + return p +} + +// Deprecated: use NewDefaultChainProvider instead +func DefaultChainProvider() *ChainProvider { + return NewDefaultChainProvider(DefaultChainProviderOptions{}) +} + +// Deprecated: use NewDefaultChainProvider instead +func DefaultChainProviderWithLogger(l Logger) *ChainProvider { + return NewDefaultChainProvider(DefaultChainProviderOptions{ + Logger: l, + }) +} + +func (o *ChainProviderOptions) applyDefaults() { + if o.RuntimeSwitchCacheDuration <= 0 { + o.RuntimeSwitchCacheDuration = defaultRuntimeSwitchCacheDuration + } + if o.logPrefix == "" { + o.logPrefix = "[ChainProvider]" + } +} + +func (o *DefaultChainProviderOptions) applyDefaults() { + if o.logPrefix == "" { + o.logPrefix = "[DefaultChainProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go new file mode 100644 index 0000000000..778458d7b8 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go @@ -0,0 +1,22 @@ +package provider + +import "time" + +type Credentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration time.Time +} + +func (c *Credentials) DeepCopy() *Credentials { + if c == nil { + return nil + } + return &Credentials{ + AccessKeyId: c.AccessKeyId, + AccessKeySecret: c.AccessKeySecret, + SecurityToken: c.SecurityToken, + Expiration: c.Expiration, + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go new file mode 100644 index 0000000000..4c7712b818 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go @@ -0,0 +1,198 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + defaultExpiryWindow = time.Minute * 30 + defaultECSMetadataServerEndpoint = "http://100.100.100.200" + defaultECSMetadataTokenTTLSeconds = 3600 + defaultClientTimeout = time.Second * 30 +) + +type ECSMetadataProvider struct { + u *Updater + + endpoint string + roleName string + metadataToken string + metadataTokenTTLSeconds int + metadataTokenExp time.Time + + client *commonHttpClient + Logger Logger +} + +type ECSMetadataProviderOptions struct { + Endpoint string + Timeout time.Duration + Transport http.RoundTripper + + RoleName string + MetadataTokenTTLSeconds int + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewECSMetadataProvider(opts ECSMetadataProviderOptions) *ECSMetadataProvider { + opts.applyDefaults() + + client := newCommonHttpClient(opts.Transport, opts.Timeout) + client.logger = opts.Logger + e := &ECSMetadataProvider{ + endpoint: opts.Endpoint, + roleName: opts.RoleName, + metadataTokenTTLSeconds: opts.MetadataTokenTTLSeconds, + client: client, + Logger: opts.Logger, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[ECSMetadataProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (e *ECSMetadataProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.u.Credentials(ctx) +} + +func (e *ECSMetadataProvider) Stop(ctx context.Context) { + e.u.Stop(ctx) +} + +type ecsMetadataStsResponse struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` + LastUpdated string `json:"LastUpdated"` + Code string `json:"Code"` +} + +func (e *ECSMetadataProvider) getCredentials(ctx context.Context) (*Credentials, error) { + roleName, err := e.getRoleName(ctx) + if err != nil { + if e, ok := err.(*httpError); ok && e.code == 404 { + return nil, NewNotEnableError(fmt.Errorf("get role name from ecs metadata failed: %w", err)) + } + } + path := fmt.Sprintf("/latest/meta-data/ram/security-credentials/%s", roleName) + data, err := e.getMedataDataWithToken(ctx, http.MethodGet, path) + if err != nil { + return nil, err + } + + var obj ecsMetadataStsResponse + if err := json.Unmarshal([]byte(data), &obj); err != nil { + return nil, fmt.Errorf("parse credentials failed: %w", err) + } + if obj.AccessKeyId == "" || obj.AccessKeySecret == "" || + obj.SecurityToken == "" || obj.Expiration == "" { + return nil, fmt.Errorf("parse credentials got unexpected data: %s", + strings.ReplaceAll(data, "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Expiration) + if err != nil { + return nil, fmt.Errorf("parse Expiration (%s) failed: %w", obj.Expiration, err) + } + return &Credentials{ + AccessKeyId: obj.AccessKeyId, + AccessKeySecret: obj.AccessKeySecret, + SecurityToken: obj.SecurityToken, + Expiration: exp, + }, nil +} + +func (e *ECSMetadataProvider) getRoleName(ctx context.Context) (string, error) { + if e.roleName != "" { + return e.roleName, nil + } + name, err := e.getMedataDataWithToken(ctx, http.MethodGet, "/latest/meta-data/ram/security-credentials/") + if err != nil { + return "", err + } + return strings.TrimSpace(name), nil +} + +func (e *ECSMetadataProvider) getMedataToken(ctx context.Context) (string, error) { + if !e.metadataTokenExp.Before(time.Now()) { + return e.metadataToken, nil + } + + e.logger().Debug("start to get metadata token") + h := http.Header{} + h.Set("X-aliyun-ecs-metadata-token-ttl-seconds", fmt.Sprintf("%d", e.metadataTokenTTLSeconds)) + body, err := e.getMedataData(ctx, http.MethodPut, "/latest/api/token", h) + if err != nil { + return "", fmt.Errorf("get metadata token failed: %w", err) + } + + e.metadataToken = strings.TrimSpace(body) + e.metadataTokenExp = time.Now().Add(time.Duration(float64(e.metadataTokenTTLSeconds)*0.8) * time.Second) + + return body, nil +} + +func (e *ECSMetadataProvider) getMedataDataWithToken(ctx context.Context, method, path string) (string, error) { + token, err := e.getMedataToken(ctx) + if err != nil { + if e, ok := err.(*httpError); !(ok && e.code == 404) { + return "", err + } + } + h := http.Header{} + if token != "" { + h.Set("X-aliyun-ecs-metadata-token", token) + } + return e.getMedataData(ctx, method, path, h) +} + +func (e *ECSMetadataProvider) getMedataData(ctx context.Context, method, path string, header http.Header) (string, error) { + url := fmt.Sprintf("%s%s", e.endpoint, path) + return e.client.send(ctx, method, url, header, nil) +} + +func (e *ECSMetadataProvider) logger() Logger { + if e.Logger != nil { + return e.Logger + } + return defaultLog +} + +func (o *ECSMetadataProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.Endpoint == "" { + o.Endpoint = defaultECSMetadataServerEndpoint + } else { + o.Endpoint = strings.TrimRight(o.Endpoint, "/") + } + if o.MetadataTokenTTLSeconds == 0 { + o.MetadataTokenTTLSeconds = defaultECSMetadataTokenTTLSeconds + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go new file mode 100644 index 0000000000..7626599ea9 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go @@ -0,0 +1,135 @@ +package provider + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +const defaultEncryptedFilePath = "/var/addon/token-config" + +type EncryptedFileProvider struct { + f *FileProvider +} + +type EncryptedFileProviderOptions struct { + FilePath string + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger +} + +func NewEncryptedFileProvider(opts EncryptedFileProviderOptions) *EncryptedFileProvider { + opts.applyDefaults() + + e := &EncryptedFileProvider{} + e.f = NewFileProvider(opts.FilePath, parseEncryptedToken, FileProviderOptions{ + RefreshPeriod: opts.RefreshPeriod, + ExpiryWindow: opts.ExpiryWindow, + Logger: opts.Logger, + LogPrefix: "[EncryptedFileProvider]", + }) + + return e +} + +func (e *EncryptedFileProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.f.Credentials(ctx) +} + +func (o *EncryptedFileProviderOptions) applyDefaults() { + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.FilePath == "" { + o.FilePath = defaultEncryptedFilePath + } + if o.Logger == nil { + o.Logger = defaultLog + } +} + +func parseEncryptedToken(data []byte) (*Credentials, error) { + var t encryptedToken + if err := json.Unmarshal(data, &t); err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + if t.Error != nil { + return nil, t.Error + } + + id, err := decrypt(t.AccessKeyId, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + se, err := decrypt(t.AccessKeySecret, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + st, err := decrypt(t.SecurityToken, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.Expiration) + if err != nil { + return nil, fmt.Errorf("parse expiration %s failed: %w", t.Expiration, err) + } + + return &Credentials{ + AccessKeyId: string(id), + AccessKeySecret: string(se), + SecurityToken: string(st), + Expiration: exp, + }, nil +} + +type encryptedToken struct { + AccessKeyId string `json:"access.key.id"` + AccessKeySecret string `json:"access.key.secret"` + SecurityToken string `json:"security.token"` + Expiration string `json:"expiration"` + Keyring string `json:"keyring"` + + Error *encryptedTokenError `json:"error,omitempty"` +} + +type encryptedTokenError struct { + RoleName string `json:"roleName,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e encryptedTokenError) Error() string { + return fmt.Sprintf("assume role %s failed: %s %s", e.RoleName, e.Code, e.Message) +} + +func decrypt(s string, keyring []byte) ([]byte, error) { + cdata, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, err + } + block, err := aes.NewCipher(keyring) + if err != nil { + return nil, err + } + + blockSize := block.BlockSize() + iv := cdata[:blockSize] + blockMode := cipher.NewCBCDecrypter(block, iv) + origData := make([]byte, len(cdata)-blockSize) + + blockMode.CryptBlocks(origData, cdata[blockSize:]) + + origData = pkcs5UnPadding(origData) + return origData, nil +} + +func pkcs5UnPadding(origData []byte) []byte { + length := len(origData) + unpadding := int(origData[length-1]) + return origData[:(length - unpadding)] +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go new file mode 100644 index 0000000000..7069b887f6 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go @@ -0,0 +1,43 @@ +package provider + +import ( + "os" + "strings" +) + +const ( + regionPlaceholder = "{region}" + defaultSTSEndpointTPL = "sts.{region}.aliyuncs.com" + defaultSTSVPCEndpointTPL = "sts-vpc.{region}.aliyuncs.com" +) + +// https://help.aliyun.com/zh/ram/developer-reference/api-sts-2015-04-01-endpoint +var stsEndpointsByRegion = map[string][2]string{ + "": { + defaultSTSEndpoint, + defaultSTSEndpoint, + }, + "__default__": { + defaultSTSEndpointTPL, + defaultSTSVPCEndpointTPL, + }, + "cn-hangzhou-finance": { + "sts.cn-hangzhou.aliyuncs.com", + "sts-vpc.cn-hangzhou.aliyuncs.com", + }, +} + +func GetSTSEndpoint(region string, vpcNetwork bool) string { + if v := os.Getenv(envStsEndpoint); v != "" { + return v + } + endpoints, exist := stsEndpointsByRegion[region] + if !exist { + endpoints = stsEndpointsByRegion["__default__"] + } + endpoint := endpoints[0] + if vpcNetwork { + endpoint = endpoints[1] + } + return strings.ReplaceAll(endpoint, regionPlaceholder, region) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go new file mode 100644 index 0000000000..7210140dda --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go @@ -0,0 +1,155 @@ +package provider + +import "os" + +const ( + envRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + envOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + envOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" + + envStsEndpoint = "ALIBABA_CLOUD_STS_ENDPOINT" + envStsHttpScheme = "ALIBABA_CLOUD_STS_HTTP_SCHEME" +) + +// https://github.com/aliyun/credentials-go +const ( + envNewSdkAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_ID" + envNewSdkAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envNewSdkSecurityToken = "ALIBABA_CLOUD_SECURITY_TOKEN" // #nosec G101 + envNewSdkRoleSessionName = "ALIBABA_CLOUD_ROLE_SESSION_NAME" + + envNewSdkCredentialsURI = "ALIBABA_CLOUD_CREDENTIALS_URI" // #nosec G101 + + envNewSdkCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" // #nosec G101 + + envRoleSessionName = envNewSdkRoleSessionName + envCredentialsURI = envNewSdkCredentialsURI // #nosec G101 +) + +// https://github.com/aliyun/alibaba-cloud-sdk-go/tree/master/sdk/auth +const ( + envOldSdkAccessKeyID = "ALICLOUD_ACCESS_KEY" + envOldSdkAccessKeySecret = "ALICLOUD_SECRET_KEY" + envOldSdkAccessKeyStsToken = "ALICLOUD_ACCESS_KEY_STS_TOKEN" // #nosec G101 + //envOldSdkRoleArn = "ALICLOUD_ROLE_ARN" + envOldSdkRoleSessionName = "ALICLOUD_ROLE_SESSION_NAME" + //envOldSdkRoleSessionExpiration = "ALICLOUD_ROLE_SESSION_EXPIRATION" + //envOldSdkPrivateKey = "ALICLOUD_PRIVATE_KEY" + //envOldSdkPublicKeyID = "ALICLOUD_PUBLIC_KEY_ID" + //envOldSdkSessionExpiration = "ALICLOUD_SESSION_EXPIRATION" + //envOldSdkRoleName = "ALICLOUD_ROLE_NAME" +) + +const ( + envAliyuncliAccessKeyId1 = "ALIBABACLOUD_ACCESS_KEY_ID" + envAliyuncliAccessKeyId2 = "ALICLOUD_ACCESS_KEY_ID" + envAliyuncliAccessKeyId3 = "ACCESS_KEY_ID" + + envAliyuncliAccessKeySecret1 = "ALIBABACLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAliyuncliAccessKeySecret2 = "ALICLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAliyuncliAccessKeySecret3 = "ACCESS_KEY_SECRET" // #nosec G101 + + envAliyuncliStsToken1 = "ALIBABACLOUD_SECURITY_TOKEN" // #nosec G101 + envAliyuncliStsToken2 = "ALICLOUD_SECURITY_TOKEN" // #nosec G101 + envAliyuncliStsToken3 = "SECURITY_TOKEN" // #nosec G101 + + envAliyuncliProfileName1 = "ALIBABACLOUD_PROFILE" + envAliyuncliProfileName2 = "ALIBABA_CLOUD_PROFILE" + envAliyuncliProfileName3 = "ALICLOUD_PROFILE" + + envAliyuncliIgnoreProfile = "ALIBABACLOUD_IGNORE_PROFILE" + + envAliyuncliProfilePath = "ALIBABACLOUD_PROFILE_PATH" +) + +// https://github.com/aliyun/alibabacloud-credentials-cli +const ( + envAccAlibabaCloudAccessKeyId = "ALIBABACLOUD_ACCESS_KEY_ID" + envAccAlibabaCloudAccessKeySecret = "ALIBABACLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAccAlibabaCloudSecurityToken = "ALIBABACLOUD_SECURITY_TOKEN" // #nosec G101 +) + +var ( + accessKeyIdEnvs = []string{ + envNewSdkAccessKeyId, + envOldSdkAccessKeyID, + envAliyuncliAccessKeyId1, + envAliyuncliAccessKeyId2, + envAccAlibabaCloudAccessKeyId, + //envAliyuncliAccessKeyId3, + } + + accessKeySecretEnvs = []string{ + envNewSdkAccessKeySecret, + envOldSdkAccessKeySecret, + envAliyuncliAccessKeySecret1, + envAliyuncliAccessKeySecret2, + envAccAlibabaCloudAccessKeySecret, + //envAliyuncliAccessKeySecret3, + } + + securityTokenEnvs = []string{ + envNewSdkSecurityToken, + envOldSdkAccessKeyStsToken, + envAliyuncliStsToken1, + envAliyuncliStsToken2, + envAccAlibabaCloudSecurityToken, + //envAliyuncliStsToken3, + } + + roleArnEnvs = []string{ + envRoleArn, + } + oidcProviderArnEnvs = []string{ + envOidcProviderArn, + } + oidcTokenFileEnvs = []string{ + envOidcTokenFile, + } + roleSessionNameEnvs = []string{ + envNewSdkRoleSessionName, + envOldSdkRoleSessionName, + } + + credentialsURIEnvs = []string{ + envNewSdkCredentialsURI, + } + + credentialFileEnvs = []string{ + envNewSdkCredentialFile, + } + + aliyuncliProfileNameEnvs = []string{ + envAliyuncliProfileName1, + envAliyuncliProfileName2, + envAliyuncliProfileName3, + } + aliyuncliIgnoreProfileEnvs = []string{ + envAliyuncliIgnoreProfile, + } + aliyuncliProfilePathEnvs = []string{ + envAliyuncliProfilePath, + } +) + +func getEnvsValue(keys []string) string { + for _, key := range keys { + v := os.Getenv(key) + if v != "" { + return v + } + } + return "" +} + +func getRoleSessionNameFromEnv() string { + return getEnvsValue(roleSessionNameEnvs) +} + +func getStsEndpointFromEnv() string { + return getEnvsValue([]string{envStsEndpoint}) +} + +func getStsHttpSchemeFromEnv() string { + return getEnvsValue([]string{envStsHttpScheme}) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go new file mode 100644 index 0000000000..33783eda26 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go @@ -0,0 +1,128 @@ +package provider + +import ( + "context" + "errors" + "fmt" + "os" +) + +const ( + envAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_ID" + envAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" + envSecurityToken = "ALIBABA_CLOUD_SECURITY_TOKEN" +) + +type EnvProvider struct { + cp CredentialsProvider +} + +type EnvProviderOptions struct { + EnvAccessKeyId string + EnvAccessKeySecret string + EnvSecurityToken string + + EnvRoleArn string + EnvOIDCProviderArn string + EnvOIDCTokenFile string + + EnvCredentialsURI string + + stsEndpoint string +} + +func NewEnvProvider(opts EnvProviderOptions) *EnvProvider { + opts.applyDefaults() + + e := &EnvProvider{} + e.cp = e.getProvider(opts) + + return e +} + +func (e *EnvProvider) Credentials(ctx context.Context) (*Credentials, error) { + cred, err := e.cp.Credentials(ctx) + + if err != nil { + if IsNoAvailableProviderError(err) { + return nil, NewNotEnableError(fmt.Errorf("not found credentials from env: %w", err)) + } + return nil, err + } + + return cred.DeepCopy(), nil +} + +func (e *EnvProvider) Stop(ctx context.Context) { + if s, ok := e.cp.(Stopper); ok { + s.Stop(ctx) + } +} + +func (e *EnvProvider) getProvider(opts EnvProviderOptions) CredentialsProvider { + accessKeyId := os.Getenv(opts.EnvAccessKeyId) + accessKeySecret := os.Getenv(opts.EnvAccessKeySecret) + securityToken := os.Getenv(opts.EnvSecurityToken) + roleArn := os.Getenv(opts.EnvRoleArn) + oidcProviderArn := os.Getenv(opts.EnvOIDCProviderArn) + oidcTokenFile := os.Getenv(opts.EnvOIDCTokenFile) + credentialsURI := os.Getenv(opts.EnvCredentialsURI) + + switch { + case accessKeyId != "" && accessKeySecret != "" && securityToken != "": + return NewSTSTokenProvider( + os.Getenv(opts.EnvAccessKeyId), + os.Getenv(opts.EnvAccessKeySecret), + os.Getenv(opts.EnvSecurityToken), + ) + + case roleArn != "" && oidcProviderArn != "" && oidcTokenFile != "": + return NewOIDCProvider(OIDCProviderOptions{ + RoleArn: os.Getenv(opts.EnvRoleArn), + OIDCProviderArn: os.Getenv(opts.EnvOIDCProviderArn), + OIDCTokenFile: os.Getenv(opts.EnvOIDCTokenFile), + STSEndpoint: opts.stsEndpoint, + }) + + case credentialsURI != "": + return NewURIProvider(credentialsURI, URIProviderOptions{}) + + case accessKeyId != "" && accessKeySecret != "": + return NewAccessKeyProvider( + os.Getenv(opts.EnvAccessKeyId), + os.Getenv(opts.EnvAccessKeySecret), + ) + + default: + return &errorProvider{ + err: NewNoAvailableProviderError( + errors.New("no validated credentials were found in environment variables")), + } + } +} + +func (o *EnvProviderOptions) applyDefaults() { + if o.EnvAccessKeyId == "" { + o.EnvAccessKeyId = envAccessKeyId + } + if o.EnvAccessKeySecret == "" { + o.EnvAccessKeySecret = envAccessKeySecret + } + if o.EnvSecurityToken == "" { + o.EnvSecurityToken = envSecurityToken + } + + if o.EnvRoleArn == "" { + o.EnvRoleArn = defaultEnvRoleArn + } + if o.EnvOIDCProviderArn == "" { + o.EnvOIDCProviderArn = defaultEnvOIDCProviderArn + } + if o.EnvOIDCTokenFile == "" { + o.EnvOIDCTokenFile = defaultEnvOIDCTokenFile + } + + if o.EnvCredentialsURI == "" { + o.EnvCredentialsURI = envCredentialsURI + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go new file mode 100644 index 0000000000..a536228929 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go @@ -0,0 +1,55 @@ +package provider + +import "context" + +type NotEnableError struct { + err error +} + +type NoAvailableProviderError struct { + err error +} + +func NewNotEnableError(err error) *NotEnableError { + return &NotEnableError{err: err} +} + +func NewNoAvailableProviderError(err error) *NoAvailableProviderError { + return &NoAvailableProviderError{err: err} +} + +func (e NotEnableError) Error() string { + return e.err.Error() +} + +func (e NoAvailableProviderError) Error() string { + return e.err.Error() +} + +func IsNotEnableError(err error) bool { + if _, ok := err.(*NotEnableError); ok { + return true + } + if _, ok := err.(NotEnableError); ok { + return true + } + return false +} + +func IsNoAvailableProviderError(err error) bool { + if _, ok := err.(*NoAvailableProviderError); ok { + return true + } + if _, ok := err.(NoAvailableProviderError); ok { + return true + } + return false +} + +type errorProvider struct { + err error +} + +func (e errorProvider) Credentials(ctx context.Context) (*Credentials, error) { + return nil, e.err +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go new file mode 100644 index 0000000000..5e8eebdece --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go @@ -0,0 +1,76 @@ +package provider + +import ( + "context" + "fmt" + "os" + "time" +) + +type FileProvider struct { + u *Updater + + filepath string + decoder func(data []byte) (*Credentials, error) +} + +type FileProviderOptions struct { + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger + LogPrefix string +} + +func NewFileProvider(filepath string, decoder func(data []byte) (*Credentials, error), opts FileProviderOptions) *FileProvider { + opts.applyDefaults() + + e := &FileProvider{ + filepath: filepath, + decoder: decoder, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: opts.LogPrefix, + }) + e.u.Start(context.TODO()) + + return e +} + +func (f *FileProvider) Credentials(ctx context.Context) (*Credentials, error) { + return f.u.Credentials(ctx) +} + +func (f *FileProvider) Stop(ctx context.Context) { + f.u.Stop(ctx) +} + +func (f *FileProvider) getCredentials(ctx context.Context) (*Credentials, error) { + data, err := os.ReadFile(f.filepath) + if err != nil { + if os.IsNotExist(err) { + return nil, NewNotEnableError(fmt.Errorf("read file %s failed: %w", f.filepath, err)) + } + return nil, fmt.Errorf("read file %s failed: %w", f.filepath, err) + } + + cred, err := f.decoder(data) + if err != nil { + return nil, fmt.Errorf("decode data from %s failed: %w", f.filepath, err) + } + return cred, nil +} + +func (f *FileProviderOptions) applyDefaults() { + if f.ExpiryWindow == 0 { + f.ExpiryWindow = defaultExpiryWindow + } + if f.Logger == nil { + f.Logger = defaultLog + } + if f.LogPrefix == "" { + f.LogPrefix = "[FileProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go new file mode 100644 index 0000000000..08d145e449 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go @@ -0,0 +1,28 @@ +package provider + +import ( + "context" + "errors" +) + +type FunctionProvider struct { + getCredentials func(ctx context.Context) (*Credentials, error) +} + +func NewFunctionProvider(getCredentials func(ctx context.Context) (*Credentials, error)) *FunctionProvider { + return &FunctionProvider{ + getCredentials: getCredentials, + } +} + +func (f *FunctionProvider) Credentials(ctx context.Context) (*Credentials, error) { + if f.getCredentials == nil { + return nil, NewNotEnableError(errors.New("getCredentials function is nil")) + } + + cred, err := f.getCredentials(ctx) + if err != nil { + return nil, err + } + return cred.DeepCopy(), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go new file mode 100644 index 0000000000..8bf61a4edb --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type httpError struct { + code int + message string + data []byte +} + +type commonHttpClient struct { + client httpClient + logger Logger +} + +func newCommonHttpClient(transport http.RoundTripper, timeout time.Duration) *commonHttpClient { + client := &http.Client{ + Transport: transport, + Timeout: timeout, + } + return &commonHttpClient{client: client} +} + +func (c *commonHttpClient) send(ctx context.Context, method, url string, header http.Header, body io.Reader) (string, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return "", fmt.Errorf("can not init request with url %s: %w", url, err) + } + req = req.WithContext(ctx) + req.Header.Set("User-Agent", UserAgent) + for k, items := range header { + for _, v := range items { + req.Header.Add(k, v) + } + } + + if debugMode { + for _, item := range genDebugReqMessages(req) { + c.getLogger().Debug(item) + } + } + + resp, err := c.client.Do(req) + if err != nil { + return "", fmt.Errorf("request %s failed: %w", url, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + c.getLogger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("read body failed when request %s: %w", url, err) + } + if resp.StatusCode != http.StatusOK { + return "", &httpError{ + code: resp.StatusCode, + message: fmt.Sprintf("status code %d is not 200 when request %s: %s", + resp.StatusCode, url, strings.ReplaceAll(string(data), "\n", " ")), + data: data, + } + } + + return string(data), nil +} + +func (c *commonHttpClient) getLogger() Logger { + if c.logger != nil { + return c.logger + } + return defaultLog +} + +func (e httpError) Error() string { + return e.message +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go new file mode 100644 index 0000000000..0b8039806d --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go @@ -0,0 +1,44 @@ +package provider + +import ( + "log" + "os" + "strings" +) + +var debugMode bool + +type Logger interface { + Info(msg string) + Debug(msg string) + Error(err error, msg string) +} + +var defaultLog Logger = defaultLogger{} + +func init() { + debugEnv := strings.Split(strings.ToLower(os.Getenv("DEBUG")), ",") + for _, item := range debugEnv { + if item == "sdk" || item == "tea" || item == "credentials-provider" { + debugMode = true + break + } + } +} + +type defaultLogger struct { +} + +func (d defaultLogger) Info(msg string) { + log.Print(msg) +} + +func (d defaultLogger) Debug(msg string) { + if debugMode { + log.Print(msg) + } +} + +func (d defaultLogger) Error(err error, msg string) { + log.Print(msg) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go new file mode 100644 index 0000000000..62ffc7c70a --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go @@ -0,0 +1,306 @@ +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +const ( + defaultEnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + defaultEnvOIDCProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + defaultEnvOIDCTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" + + defaultExpiryWindowForAssumeRole = time.Minute * 10 +) + +var ( + defaultSessionName = "default-session-name" + defaultSTSEndpoint = "sts.aliyuncs.com" + defaultSTSScheme = "HTTPS" +) + +type OIDCProvider struct { + u *Updater + + client *http.Client + + stsEndpoint string + stsScheme string + sessionName string + + policy string + durationSeconds string + + roleArn string + oidcProviderArn string + oidcTokenFile string + + Logger Logger +} + +type OIDCProviderOptions struct { + STSEndpoint string + stsScheme string + SessionName string + + TokenDuration time.Duration + Policy string + + RoleArn string + EnvRoleArn string + OIDCProviderArn string + EnvOIDCProviderArn string + OIDCTokenFile string + EnvOIDCTokenFile string + + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func init() { + sessionName := getRoleSessionNameFromEnv() + if sessionName != "" { + defaultSessionName = sessionName + } + if v := getStsEndpointFromEnv(); v != "" { + defaultSTSEndpoint = v + } + if v := getStsHttpSchemeFromEnv(); v != "" { + defaultSTSScheme = strings.ToUpper(v) + } +} + +func NewOIDCProvider(opts OIDCProviderOptions) *OIDCProvider { + opts.applyDefaults() + + client := &http.Client{ + Transport: opts.Transport, + Timeout: opts.Timeout, + } + e := &OIDCProvider{ + client: client, + stsEndpoint: opts.STSEndpoint, + stsScheme: opts.stsScheme, + sessionName: opts.SessionName, + policy: opts.Policy, + roleArn: opts.getRoleArn(), + oidcProviderArn: opts.getOIDCProviderArn(), + oidcTokenFile: opts.getOIDCTokenFile(), + Logger: opts.Logger, + } + if opts.TokenDuration >= time.Second*900 { + ds := int64(opts.TokenDuration.Seconds()) + e.durationSeconds = fmt.Sprintf("%d", ds) + } + + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[OIDCProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (o *OIDCProvider) Credentials(ctx context.Context) (*Credentials, error) { + return o.u.Credentials(ctx) +} + +func (o *OIDCProvider) Stop(ctx context.Context) { + o.u.Stop(ctx) +} + +func (o *OIDCProvider) getCredentials(ctx context.Context) (*Credentials, error) { + roleArn := o.roleArn + oidcProviderArn := o.oidcProviderArn + tokenFile := o.oidcTokenFile + if roleArn == "" || oidcProviderArn == "" || tokenFile == "" { + return nil, NewNotEnableError(errors.New("roleArn, oidcProviderArn or oidcTokenFile is empty")) + } + + tokenData, err := os.ReadFile(tokenFile) + if err != nil { + return nil, err + } + token := string(tokenData) + return o.assumeRoleWithOIDC(ctx, roleArn, oidcProviderArn, token) +} + +type oidcResponse struct { + Credentials *credentialsInResponse `json:"Credentials"` +} + +type credentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` +} + +func (o *OIDCProvider) assumeRoleWithOIDC(ctx context.Context, roleArn, oidcProviderArn, token string) (*Credentials, error) { + reqOpts := newCommonRequest() + reqOpts.Domain = o.stsEndpoint + reqOpts.Scheme = o.stsScheme + reqOpts.Method = "POST" + reqOpts.QueryParams["Timestamp"] = getTimeInFormatISO8601() + reqOpts.QueryParams["Action"] = "AssumeRoleWithOIDC" + reqOpts.QueryParams["Format"] = "JSON" + reqOpts.QueryParams["RoleArn"] = roleArn + reqOpts.QueryParams["OIDCProviderArn"] = oidcProviderArn + reqOpts.BodyParams["OIDCToken"] = token + if o.durationSeconds != "" { + reqOpts.QueryParams["DurationSeconds"] = o.durationSeconds + } + if o.policy != "" { + reqOpts.BodyParams["Policy"] = o.policy + } + reqOpts.QueryParams["RoleSessionName"] = o.sessionName + reqOpts.QueryParams["Version"] = "2015-04-01" + reqOpts.QueryParams["SignatureNonce"] = getUUID() + reqOpts.Headers["Accept-Encoding"] = "identity" + reqOpts.Headers["content-type"] = "application/x-www-form-urlencoded" + reqOpts.URL = reqOpts.BuildURL() + + req, err := http.NewRequest(reqOpts.Method, reqOpts.URL, strings.NewReader(getURLFormedMap(reqOpts.BodyParams))) + if err != nil { + return nil, err + } + for k, v := range reqOpts.Headers { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", UserAgent) + req = req.WithContext(ctx) + + if debugMode { + for _, item := range genDebugReqMessages(req) { + o.logger().Debug(item) + } + } + + resp, err := o.client.Do(req) + if err != nil { + return nil, fmt.Errorf("request %s failed: %w", req.URL, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + o.logger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var obj oidcResponse + if err := json.Unmarshal(data, &obj); err != nil { + return nil, err + } + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("call AssumeRoleWithOIDC failed, got unexpected body: %s", + strings.ReplaceAll(string(data), "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) + if err != nil { + return nil, err + } + return &Credentials{ + AccessKeyId: obj.Credentials.AccessKeyId, + AccessKeySecret: obj.Credentials.AccessKeySecret, + SecurityToken: obj.Credentials.SecurityToken, + Expiration: exp, + }, nil +} + +func (o *OIDCProvider) logger() Logger { + if o.Logger != nil { + return o.Logger + } + return defaultLog +} + +func (o *OIDCProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.STSEndpoint == "" { + o.STSEndpoint = defaultSTSEndpoint + } else { + o.STSEndpoint = strings.TrimRight(o.STSEndpoint, "/") + } + + if strings.HasPrefix(o.STSEndpoint, "https://") { + o.stsScheme = "HTTPS" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "https://") + } else if strings.HasPrefix(o.STSEndpoint, "http://") { + o.stsScheme = "HTTP" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "http://") + } + if o.stsScheme == "" { + o.stsScheme = defaultSTSScheme + } + o.stsScheme = strings.ToUpper(o.stsScheme) + + if o.SessionName == "" { + o.SessionName = defaultSessionName + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindowForAssumeRole + if o.TokenDuration > 0 && o.TokenDuration <= o.ExpiryWindow { + o.ExpiryWindow = o.TokenDuration / 2 + } + } + if o.EnvRoleArn == "" { + o.EnvRoleArn = defaultEnvRoleArn + } + if o.EnvOIDCProviderArn == "" { + o.EnvOIDCProviderArn = defaultEnvOIDCProviderArn + } + if o.EnvOIDCTokenFile == "" { + o.EnvOIDCTokenFile = defaultEnvOIDCTokenFile + } + if o.Logger == nil { + o.Logger = defaultLog + } +} + +func (o *OIDCProviderOptions) getRoleArn() string { + if o.RoleArn != "" { + return o.RoleArn + } + return os.Getenv(o.EnvRoleArn) +} + +func (o *OIDCProviderOptions) getOIDCProviderArn() string { + if o.OIDCProviderArn != "" { + return o.OIDCProviderArn + } + return os.Getenv(o.EnvOIDCProviderArn) +} + +func (o *OIDCProviderOptions) getOIDCTokenFile() string { + if o.OIDCTokenFile != "" { + return o.OIDCTokenFile + } + return os.Getenv(o.EnvOIDCTokenFile) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go new file mode 100644 index 0000000000..90e8b63bd1 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go @@ -0,0 +1,24 @@ +package provider + +import ( + "context" + "fmt" + "os" + "path" + "runtime" +) + +var UserAgent = "" + +type CredentialsProvider interface { + Credentials(ctx context.Context) (*Credentials, error) +} + +type Stopper interface { + Stop(ctx context.Context) +} + +func init() { + name := path.Base(os.Args[0]) + UserAgent = fmt.Sprintf("%s %s/%s ack-ram-tool/provider/%s", name, runtime.GOOS, runtime.GOARCH, runtime.Version()) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go new file mode 100644 index 0000000000..3d2f0b3c52 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go @@ -0,0 +1,174 @@ +package provider + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "math/rand" + "net/http" + "net/url" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func shaHmac1(source, secret string) string { + key := []byte(secret) + h := hmac.New(sha1.New, key) + h.Write([]byte(source)) + signedBytes := h.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +type commonRequest struct { + Scheme string + Method string + Domain string + RegionId string + URL string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + BodyParams map[string]string + userAgent map[string]string + QueryParams map[string]string + Headers map[string]string + + queries string +} + +func newCommonRequest() *commonRequest { + return &commonRequest{ + BodyParams: make(map[string]string), + QueryParams: make(map[string]string), + Headers: make(map[string]string), + } +} + +// BuildURL returns a url +func (request *commonRequest) BuildURL() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + request.queries = "/?" + getURLFormedMap(request.QueryParams) + return url + request.queries +} + +func getURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +// BuildStringToSign returns BuildStringToSign +func (request *commonRequest) BuildStringToSign() (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.QueryParams { + signParams[key] = value + } + + for key, value := range request.BodyParams { + signParams[key] = value + } + stringToSign = getURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.Method + "&%2F&" + stringToSign + return +} + +func getTimeInFormatISO8601() (timeStr string) { + return time.Now().UTC().Format("2006-01-02T15:04:05Z") +} + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randStringBytes(n int) string { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[r.Intn(len(letterBytes))] + } + return string(b) +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, randStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = u[8]&(0xff>>2) | (0x02 << 6) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err := r.Read(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +func getUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +func genDebugReqMessages(req *http.Request) []string { + var ret []string + ret = append(ret, fmt.Sprintf("%s %s", req.Method, req.URL.String())) + ret = append(ret, "Request Headers:") + for k, vs := range req.Header { + ret = append(ret, fmt.Sprintf(" %s: %s", k, strings.Join(vs, ", "))) + } + return ret +} + +func genDebugRespMessages(resp *http.Response) []string { + var ret []string + ret = append(ret, fmt.Sprintf("Response Status: %s", resp.Status)) + ret = append(ret, "Response Headers:") + for k, vs := range resp.Header { + ret = append(ret, fmt.Sprintf(" %s: %s", k, strings.Join(vs, ", "))) + } + return ret +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go new file mode 100644 index 0000000000..7b55ebc9b2 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go @@ -0,0 +1,240 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type RoleArnProvider struct { + u *Updater + + client *http.Client + + stsEndpoint string + stsScheme string + sessionName string + + policy string + externalId string + durationSeconds string + + roleArn string + cp CredentialsProvider + + Logger Logger +} + +type RoleArnProviderOptions struct { + STSEndpoint string + stsScheme string + SessionName string + + TokenDuration time.Duration + Policy string + ExternalId string + + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewRoleArnProvider(cp CredentialsProvider, roleArn string, opts RoleArnProviderOptions) *RoleArnProvider { + opts.applyDefaults() + + client := &http.Client{ + Transport: opts.Transport, + Timeout: opts.Timeout, + } + e := &RoleArnProvider{ + client: client, + stsEndpoint: opts.STSEndpoint, + stsScheme: opts.stsScheme, + sessionName: opts.SessionName, + policy: opts.Policy, + externalId: opts.ExternalId, + roleArn: roleArn, + cp: cp, + Logger: opts.Logger, + } + if opts.TokenDuration >= time.Second*900 { + ds := int64(opts.TokenDuration.Seconds()) + e.durationSeconds = fmt.Sprintf("%d", ds) + } + + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[RoleArnProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (r *RoleArnProvider) Credentials(ctx context.Context) (*Credentials, error) { + return r.u.Credentials(ctx) +} + +func (r *RoleArnProvider) Stop(ctx context.Context) { + r.u.Stop(ctx) + if s, ok := r.cp.(Stopper); ok { + s.Stop(ctx) + } +} + +func (r *RoleArnProvider) getCredentials(ctx context.Context) (*Credentials, error) { + return r.assumeRole(ctx, r.roleArn) +} + +type roleArnResponse struct { + Credentials *credentialsInResponse `json:"Credentials"` +} + +func (r *RoleArnProvider) assumeRole(ctx context.Context, roleArn string) (*Credentials, error) { + cred, err := r.cp.Credentials(ctx) + if err != nil { + return nil, err + } + + reqOpts := newCommonRequest() + reqOpts.Domain = r.stsEndpoint + reqOpts.Scheme = r.stsScheme + reqOpts.Method = "POST" + reqOpts.QueryParams["Timestamp"] = getTimeInFormatISO8601() + reqOpts.QueryParams["AccessKeyId"] = cred.AccessKeyId + reqOpts.QueryParams["Action"] = "AssumeRole" + reqOpts.QueryParams["Format"] = "JSON" + reqOpts.QueryParams["RoleArn"] = roleArn + if r.durationSeconds != "" { + reqOpts.QueryParams["DurationSeconds"] = r.durationSeconds + } + if r.policy != "" { + reqOpts.BodyParams["Policy"] = r.policy + } + if r.externalId != "" { + reqOpts.QueryParams["ExternalId"] = r.externalId + } + reqOpts.QueryParams["RoleSessionName"] = r.sessionName + reqOpts.QueryParams["SignatureMethod"] = "HMAC-SHA1" + reqOpts.QueryParams["SignatureVersion"] = "1.0" + reqOpts.QueryParams["Version"] = "2015-04-01" + reqOpts.QueryParams["SignatureNonce"] = getUUID() + if cred.SecurityToken != "" { + reqOpts.QueryParams["SecurityToken"] = cred.SecurityToken + } + signature := shaHmac1(reqOpts.BuildStringToSign(), cred.AccessKeySecret+"&") + reqOpts.QueryParams["Signature"] = signature + + reqOpts.Headers["Accept-Encoding"] = "identity" + reqOpts.Headers["content-type"] = "application/x-www-form-urlencoded" + reqOpts.URL = reqOpts.BuildURL() + + req, err := http.NewRequest(reqOpts.Method, reqOpts.URL, nil) + if err != nil { + return nil, err + } + for k, v := range reqOpts.Headers { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", UserAgent) + req = req.WithContext(ctx) + + if debugMode { + for _, item := range genDebugReqMessages(req) { + r.logger().Debug(item) + } + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, fmt.Errorf("request %s failed: %w", req.URL, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + r.logger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var obj roleArnResponse + if err := json.Unmarshal(data, &obj); err != nil { + return nil, err + } + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("call AssumeRole failed, got unexpected body: %s", + strings.ReplaceAll(string(data), "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) + if err != nil { + return nil, err + } + return &Credentials{ + AccessKeyId: obj.Credentials.AccessKeyId, + AccessKeySecret: obj.Credentials.AccessKeySecret, + SecurityToken: obj.Credentials.SecurityToken, + Expiration: exp, + }, nil +} + +func (r *RoleArnProvider) logger() Logger { + if r.Logger != nil { + return r.Logger + } + return defaultLog +} + +func (o *RoleArnProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.STSEndpoint == "" { + o.STSEndpoint = defaultSTSEndpoint + } else { + o.STSEndpoint = strings.TrimRight(o.STSEndpoint, "/") + } + + if strings.HasPrefix(o.STSEndpoint, "https://") { + o.stsScheme = "HTTPS" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "https://") + } else if strings.HasPrefix(o.STSEndpoint, "http://") { + o.stsScheme = "HTTP" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "http://") + } + if o.stsScheme == "" { + o.stsScheme = defaultSTSScheme + } + o.stsScheme = strings.ToUpper(o.stsScheme) + + if o.SessionName == "" { + o.SessionName = defaultSessionName + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindowForAssumeRole + if o.TokenDuration > 0 && o.TokenDuration <= o.ExpiryWindow { + o.ExpiryWindow = o.TokenDuration / 2 + } + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go new file mode 100644 index 0000000000..9164dd8b1b --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go @@ -0,0 +1,48 @@ +package provider + +import ( + "context" + "fmt" + "golang.org/x/sync/semaphore" +) + +type SemaphoreProvider struct { + weighted *semaphore.Weighted + + cp CredentialsProvider +} + +type SemaphoreProviderOptions struct { + MaxWeight int64 +} + +func NewSemaphoreProvider(cp CredentialsProvider, opts SemaphoreProviderOptions) *SemaphoreProvider { + opts.applyDefaults() + + w := semaphore.NewWeighted(opts.MaxWeight) + return &SemaphoreProvider{ + weighted: w, + cp: cp, + } +} + +func (p *SemaphoreProvider) Credentials(ctx context.Context) (*Credentials, error) { + if err := p.weighted.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquire semaphore: %w", err) + } + defer p.weighted.Release(1) + + return p.cp.Credentials(ctx) +} + +func (o *SemaphoreProviderOptions) applyDefaults() { + if o.MaxWeight <= 0 { + o.MaxWeight = 1 + } +} + +func (p *SemaphoreProvider) Stop(ctx context.Context) { + if s, ok := p.cp.(Stopper); ok { + s.Stop(ctx) + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go new file mode 100644 index 0000000000..fcabdac8f8 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go @@ -0,0 +1,35 @@ +package provider + +import ( + "context" + "errors" + "time" +) + +type STSTokenProvider struct { + cred *Credentials +} + +func NewSTSTokenProvider(accessKeyId, accessKeySecret, securityToken string) *STSTokenProvider { + return &STSTokenProvider{ + cred: &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + }, + } +} + +func (a *STSTokenProvider) Credentials(ctx context.Context) (*Credentials, error) { + if a.cred.AccessKeyId == "" || a.cred.AccessKeySecret == "" || a.cred.SecurityToken == "" { + return nil, NewNotEnableError( + errors.New("AccessKeyId, AccessKeySecret or SecurityToken is empty")) + } + + return a.cred.DeepCopy(), nil +} + +func (a *STSTokenProvider) SetExpiration(exp time.Time) *STSTokenProvider { + a.cred.Expiration = exp + return a +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go new file mode 100644 index 0000000000..705f436dd6 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go @@ -0,0 +1,206 @@ +package provider + +import ( + "context" + "fmt" + "sync" + "time" +) + +type getCredentialsFunc func(ctx context.Context) (*Credentials, error) + +type Updater struct { + expiryWindow time.Duration + refreshPeriod time.Duration + + // for fix below case: + // * both auth.Signer and credential.Credential are not concurrent safe + expiryWindowForRefreshLoop time.Duration + + getCredentials func(ctx context.Context) (*Credentials, error) + + cred *Credentials + lockForCred sync.RWMutex + + Logger Logger + nowFunc func() time.Time + logPrefix string + + doneCh chan struct{} + stopped bool +} + +type UpdaterOptions struct { + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger + LogPrefix string +} + +func NewUpdater(getter getCredentialsFunc, opts UpdaterOptions) *Updater { + u := &Updater{ + expiryWindow: opts.ExpiryWindow, + refreshPeriod: opts.RefreshPeriod, + expiryWindowForRefreshLoop: opts.RefreshPeriod + opts.RefreshPeriod/2, + getCredentials: getter, + cred: nil, + lockForCred: sync.RWMutex{}, + Logger: opts.Logger, + nowFunc: time.Now, + logPrefix: opts.LogPrefix, + doneCh: make(chan struct{}), + } + return u +} + +func (u *Updater) Start(ctx context.Context) { + if u.refreshPeriod <= 0 { + return + } + + go u.startRefreshLoop(ctx) +} + +func (u *Updater) Stop(shutdownCtx context.Context) { + u.logger().Debug(fmt.Sprintf("%s start to stop...", u.logPrefix)) + + go func() { + u.lockForCred.Lock() + defer u.lockForCred.Unlock() + if u.stopped { + return + } + u.stopped = true + close(u.doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-u.doneCh: + } +} + +func (u *Updater) startRefreshLoop(ctx context.Context) { + ticket := time.NewTicker(u.refreshPeriod) + defer ticket.Stop() + +loop: + for { + select { + case <-ctx.Done(): + break loop + case <-u.doneCh: + break loop + case <-ticket.C: + u.refreshCredForLoop(ctx) + } + } +} + +func (u *Updater) Credentials(ctx context.Context) (*Credentials, error) { + if u.Expired() { + if err := u.refreshCred(ctx); err != nil { + return nil, err + } + } + + cred := u.getCred().DeepCopy() + return cred, nil +} + +func (u *Updater) refreshCredForLoop(ctx context.Context) { + exp := u.expiration() + + if !u.expired(u.expiryWindowForRefreshLoop) { + return + } + + u.logger().Debug(fmt.Sprintf("%s start refresh credentials, current expiration: %s", + u.logPrefix, exp.Format("2006-01-02T15:04:05Z"))) + + maxRetry := 5 + for i := 0; i < maxRetry; i++ { + err := u.refreshCred(ctx) + if err == nil { + return + } + if IsNotEnableError(err) { + return + } + if i < maxRetry-1 { + time.Sleep(time.Second * time.Duration(i)) + } + } +} + +func (u *Updater) refreshCred(ctx context.Context) error { + cred, err := u.getCredentials(ctx) + if err != nil { + if IsNotEnableError(err) { + return err + } + u.logger().Error(err, fmt.Sprintf("%s refresh credentials failed: %s", u.logPrefix, err)) + return err + } + u.logger().Debug(fmt.Sprintf("%s refreshed credentials, expiration: %s", + u.logPrefix, cred.Expiration.Format("2006-01-02T15:04:05Z"))) + + u.setCred(cred) + return nil +} + +func (u *Updater) setCred(cred *Credentials) { + u.lockForCred.Lock() + defer u.lockForCred.Unlock() + + newCred := cred.DeepCopy() + newCred.Expiration = newCred.Expiration.Round(0) + if u.expiryWindow > 0 { + newCred.Expiration = newCred.Expiration.Add(-u.expiryWindow) + } + u.cred = newCred +} + +func (u *Updater) getCred() *Credentials { + u.lockForCred.RLock() + defer u.lockForCred.RUnlock() + + return u.cred +} + +func (u *Updater) Expired() bool { + return u.expired(0) +} + +func (u *Updater) expired(expiryDelta time.Duration) bool { + exp := u.expiration() + if expiryDelta > 0 { + exp = exp.Add(-expiryDelta) + } + + return exp.Before(u.now()) +} + +func (u *Updater) expiration() time.Time { + cred := u.getCred() + + if cred == nil { + return time.Time{} + } + + return cred.Expiration.Round(0) +} + +func (u *Updater) now() time.Time { + if u.nowFunc == nil { + return time.Now() + } + return u.nowFunc() +} + +func (u *Updater) logger() Logger { + if u.Logger != nil { + return u.Logger + } + return defaultLog +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go new file mode 100644 index 0000000000..962e728e71 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go @@ -0,0 +1,117 @@ +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +type URIProvider struct { + u *Updater + + url string + + client *commonHttpClient + Logger Logger +} + +type URIProviderOptions struct { + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewURIProvider(url string, opts URIProviderOptions) *URIProvider { + opts.applyDefaults() + + client := newCommonHttpClient(opts.Transport, opts.Timeout) + client.logger = opts.Logger + + e := &URIProvider{ + url: url, + client: client, + Logger: opts.Logger, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[URIProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (e *URIProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.u.Credentials(ctx) +} + +func (e *URIProvider) Stop(ctx context.Context) { + e.u.Stop(ctx) +} + +func (e *URIProvider) getCredentials(ctx context.Context) (*Credentials, error) { + if e.url == "" { + return nil, NewNotEnableError(errors.New("URL is empty")) + } + + data, err := e.client.send(ctx, http.MethodGet, e.url, http.Header{}, nil) + if err != nil { + return nil, err + } + + var obj ecsMetadataStsResponse + if err := json.Unmarshal([]byte(data), &obj); err != nil { + return nil, fmt.Errorf("parse credentials failed: %w", err) + } + if obj.AccessKeyId == "" || obj.AccessKeySecret == "" { + return nil, fmt.Errorf("parse credentials got unexpected data: %s", + strings.ReplaceAll(data, "\n", " ")) + } + + var exp time.Time + if obj.Expiration != "" { + exp, err = time.Parse("2006-01-02T15:04:05Z", obj.Expiration) + if err != nil { + return nil, fmt.Errorf("parse Expiration failed: %w", err) + } + } + + return &Credentials{ + AccessKeyId: obj.AccessKeyId, + AccessKeySecret: obj.AccessKeySecret, + SecurityToken: obj.SecurityToken, + Expiration: exp, + }, nil +} + +func (e *URIProvider) logger() Logger { + if e.Logger != nil { + return e.Logger + } + return defaultLog +} + +func (o *URIProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go new file mode 100644 index 0000000000..fd2981cc0e --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go @@ -0,0 +1,92 @@ +package provider + +import ( + "context" + "fmt" + "time" +) + +type SignerForV1SDK struct { + p CredentialsProvider + Logger Logger + credentialRetrievalTimeout time.Duration +} + +type SignerForV1SDKOptions struct { + Logger Logger + CredentialRetrievalTimeout time.Duration +} + +func NewSignerForV1SDK(p CredentialsProvider, opts SignerForV1SDKOptions) *SignerForV1SDK { + opts.applyDefaults() + + return &SignerForV1SDK{ + p: p, + Logger: opts.Logger, + credentialRetrievalTimeout: opts.CredentialRetrievalTimeout, + } +} + +func (s *SignerForV1SDK) GetName() string { + return "HMAC-SHA1" +} + +func (s *SignerForV1SDK) GetType() string { + return "" +} + +func (s *SignerForV1SDK) GetVersion() string { + return "1.0" +} + +func (s *SignerForV1SDK) GetAccessKeyId() (string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + return "", err + } + return cred.AccessKeyId, nil +} + +func (s *SignerForV1SDK) GetExtraParam() map[string]string { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + s.logger().Error(err, fmt.Sprintf("get credentials failed: %s", err)) + return nil + } + if cred.SecurityToken != "" { + return map[string]string{"SecurityToken": cred.SecurityToken} + } + return nil +} + +func (s *SignerForV1SDK) Sign(stringToSign, secretSuffix string) string { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + s.logger().Error(err, fmt.Sprintf("get credentials failed: %s", err)) + return "" + } + secret := cred.AccessKeySecret + secretSuffix + return shaHmac1(stringToSign, secret) +} + +func (s *SignerForV1SDK) logger() Logger { + if s.Logger != nil { + return s.Logger + } + return defaultLog +} + +func (o *SignerForV1SDKOptions) applyDefaults() { + if o.Logger == nil { + o.Logger = defaultLog + } + if o.CredentialRetrievalTimeout <= 0 { + o.CredentialRetrievalTimeout = defaultTimeout + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go new file mode 100644 index 0000000000..713695993f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go @@ -0,0 +1,87 @@ +package provider + +import ( + "context" + "time" +) + +var defaultTimeout = time.Minute * 10 + +type CredentialForV2SDK struct { + p CredentialsProvider + Logger Logger + credentialRetrievalTimeout time.Duration +} + +type CredentialForV2SDKOptions struct { + Logger Logger + CredentialRetrievalTimeout time.Duration +} + +func NewCredentialForV2SDK(p CredentialsProvider, opts CredentialForV2SDKOptions) *CredentialForV2SDK { + opts.applyDefaults() + + return &CredentialForV2SDK{ + p: p, + Logger: opts.Logger, + credentialRetrievalTimeout: opts.CredentialRetrievalTimeout, + } +} + +func (c *CredentialForV2SDK) GetAccessKeyId() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.AccessKeyId), nil +} + +func (c *CredentialForV2SDK) GetAccessKeySecret() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.AccessKeySecret), nil +} + +func (c *CredentialForV2SDK) GetSecurityToken() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.SecurityToken), nil +} + +func (c *CredentialForV2SDK) GetBearerToken() *string { + return stringPointer("") +} + +func (c *CredentialForV2SDK) GetType() *string { + return stringPointer("CredentialForV2SDK") +} + +func (c *CredentialForV2SDK) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLog +} + +func (o *CredentialForV2SDKOptions) applyDefaults() { + if o.Logger == nil { + o.Logger = defaultLog + } + if o.CredentialRetrievalTimeout <= 0 { + o.CredentialRetrievalTimeout = defaultTimeout + } +} + +func stringPointer(s string) *string { + return &s +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go index 7d960abaf1..de0acc7f87 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "os" + "strconv" "time" "github.com/alibabacloud-go/tea/tea" @@ -154,6 +155,9 @@ func (r *OIDCCredential) updateCredential() (err error) { if r.Policy != "" { request.QueryParams["Policy"] = r.Policy } + if r.RoleSessionExpiration > 0 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration) + } request.QueryParams["RoleSessionName"] = r.RoleSessionName request.QueryParams["Version"] = "2015-04-01" request.QueryParams["SignatureNonce"] = utils.GetUUID() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go new file mode 100644 index 0000000000..6504a21864 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go @@ -0,0 +1,18 @@ +package aws + +// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. +type AccountIDEndpointMode string + +const ( + // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing + AccountIDEndpointModeUnset AccountIDEndpointMode = "" + + // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present + AccountIDEndpointModePreferred = "preferred" + + // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity + AccountIDEndpointModeRequired = "required" + + // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing + AccountIDEndpointModeDisabled = "disabled" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index 2264200c16..16000d7927 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -162,6 +162,9 @@ type Config struct { // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or // the shared config profile attribute request_min_compression_size_bytes RequestMinCompressSizeBytes int64 + + // Controls how a resolved AWS account ID is handled for endpoint routing. + AccountIDEndpointMode AccountIDEndpointMode } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 714d4ad85c..98ba770564 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -90,6 +90,9 @@ type Credentials struct { // The time the credentials will expire at. Should be ignored if CanExpire // is false. Expires time.Time + + // The ID of the account for the credentials. + AccountID string } // Expired returns if the credentials have expired. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go index aa10a9b40f..99edbf3ee6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -70,6 +70,10 @@ func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found // The SDK will automatically resolve these endpoints per API client using an // internal endpoint resolvers. If you'd like to provide custom endpoint // resolving behavior you can implement the EndpointResolver interface. +// +// Deprecated: This structure was used with the global [EndpointResolver] +// interface, which has been deprecated in favor of service-specific endpoint +// resolution. See the deprecation docs on that interface for more information. type Endpoint struct { // The base URL endpoint the SDK API clients will use to make API calls to. // The SDK will suffix URI path and query elements to this endpoint. @@ -124,6 +128,8 @@ type Endpoint struct { } // EndpointSource is the endpoint source type. +// +// Deprecated: The global [Endpoint] structure is deprecated. type EndpointSource int const ( @@ -161,19 +167,25 @@ func (e *EndpointNotFoundError) Unwrap() error { // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. // -// Deprecated: See EndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Setting a value for +// EndpointResolver on aws.Config or service client options will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. type EndpointResolver interface { ResolveEndpoint(service, region string) (Endpoint, error) } // EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. // -// Deprecated: See EndpointResolverWithOptionsFunc +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverFunc func(service, region string) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. -// -// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { return e(service, region) } @@ -184,11 +196,17 @@ func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, // available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptions interface { ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) } // EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 847cc51a98..fca15f3822 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.0" +const goModuleVersion = "1.30.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go index b0133f4c88..19d6107c46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go @@ -112,6 +112,8 @@ type MetricData struct { ResolveEndpointStartTime time.Time ResolveEndpointEndTime time.Time EndpointResolutionDuration time.Duration + GetIdentityStartTime time.Time + GetIdentityEndTime time.Time InThroughput float64 OutThroughput float64 RetryCount int @@ -122,6 +124,7 @@ type MetricData struct { OperationName string PartitionID string Region string + UserAgent string RequestContentLength int64 Stream StreamMetrics Attempts []AttemptMetrics @@ -144,8 +147,6 @@ type AttemptMetrics struct { ConnRequestedTime time.Time ConnObtainedTime time.Time ConcurrencyAcquireDuration time.Duration - CredentialFetchStartTime time.Time - CredentialFetchEndTime time.Time SignStartTime time.Time SignEndTime time.Time SigningDuration time.Duration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index db7cda42d9..ff0bc921f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "sort" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -30,6 +31,7 @@ const ( FrameworkMetadata AdditionalMetadata ApplicationIdentifier + FeatureMetadata2 ) func (k SDKAgentKeyType) string() string { @@ -50,6 +52,8 @@ func (k SDKAgentKeyType) string() string { return "lib" case ApplicationIdentifier: return "app" + case FeatureMetadata2: + return "m" case AdditionalMetadata: fallthrough default: @@ -64,9 +68,29 @@ var validChars = map[rune]bool{ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, } +// UserAgentFeature enumerates tracked SDK features. +type UserAgentFeature string + +// Enumerates UserAgentFeature. +const ( + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + UserAgentFeatureGZIPRequestCompression = "L" +) + // RequestUserAgent is a build middleware that set the User-Agent for the request. type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder + features map[UserAgentFeature]struct{} } // NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the @@ -87,6 +111,7 @@ func NewRequestUserAgent() *RequestUserAgent { r := &RequestUserAgent{ sdkAgent: sdkAgent, userAgent: userAgent, + features: map[UserAgentFeature]struct{}{}, } addSDKMetadata(r) @@ -191,6 +216,12 @@ func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) } +// AddUserAgentFeature adds the feature ID to the tracking list to be emitted +// in the final User-Agent string. +func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { + u.features[feature] = struct{}{} +} + // AddSDKAgentKey adds the component identified by name to the User-Agent string. func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { // TODO: should target sdkAgent @@ -227,6 +258,9 @@ func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" updateHTTPHeader(request, userAgent, u.userAgent.Build()) + if len(u.features) > 0 { + updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) + } } func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { @@ -259,3 +293,13 @@ func rules(r rune) rune { return '-' } } + +func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { + fs := make([]string, 0, len(features)) + for f := range features { + fs = append(fs, string(f)) + } + + sort.Strings(fs) + return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index dc703d482d..b645fbdf13 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -2,12 +2,15 @@ package retry import ( "context" + "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/aws" awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -39,6 +42,10 @@ type Attempt struct { requestCloner RequestCloner } +// define the threshold at which we will consider certain kind of errors to be probably +// caused by clock skew +const skewThreshold = 4 * time.Minute + // NewAttemptMiddleware returns a new Attempt retry middleware. func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { m := &Attempt{ @@ -86,6 +93,9 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn AttemptClockSkew: attemptClockSkew, }) + // Setting clock skew to be used on other context (like signing) + ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) + var attemptResult AttemptResult out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) @@ -185,6 +195,8 @@ func (r *Attempt) handleAttempt( return out, attemptResult, nopRelease, err } + err = wrapAsClockSkew(ctx, err) + //------------------------------ // Is Retryable and Should Retry //------------------------------ @@ -247,6 +259,37 @@ func (r *Attempt) handleAttempt( return out, attemptResult, releaseRetryToken, err } +// errors that, if detected when we know there's a clock skew, +// can be retried and have a high chance of success +var possibleSkewCodes = map[string]struct{}{ + "InvalidSignatureException": {}, + "SignatureDoesNotMatch": {}, + "AuthFailure": {}, +} + +var definiteSkewCodes = map[string]struct{}{ + "RequestExpired": {}, + "RequestInTheFuture": {}, + "RequestTimeTooSkewed": {}, +} + +// wrapAsClockSkew checks if this error could be related to a clock skew +// error and if so, wrap the error. +func wrapAsClockSkew(ctx context.Context, err error) error { + var v interface{ ErrorCode() string } + if !errors.As(err, &v) { + return err + } + if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { + return &retryableClockSkewError{Err: err} + } + _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] + if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { + return &retryableClockSkewError{Err: err} + } + return err +} + // MetricsHeader attaches SDK request metric header for retries to the transport type MetricsHeader struct{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 987affdde6..acd8d1cc3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -2,6 +2,7 @@ package retry import ( "errors" + "fmt" "net" "net/url" "strings" @@ -199,3 +200,23 @@ func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { return aws.TrueTernary } + +// retryableClockSkewError marks errors that can be caused by clock skew +// (difference between server time and client time). +// This is returned when there's certain confidence that adjusting the client time +// could allow a retry to succeed +type retryableClockSkewError struct{ Err error } + +func (e *retryableClockSkewError) Error() string { + return fmt.Sprintf("Probable clock skew error: %v", e.Err) +} + +// Unwrap returns the wrapped error. +func (e *retryableClockSkewError) Unwrap() error { + return e.Err +} + +// RetryableError allows the retryer to retry this request +func (e *retryableClockSkewError) RetryableError() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index ca738f234b..734e548bd6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -38,7 +38,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -46,7 +45,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Grant-Write-Acp": struct{}{}, "X-Amz-Metadata-Directive": struct{}{}, "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, "X-Amz-Server-Side-Encryption-Context": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index febeb0482d..a9db6433de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -301,22 +300,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } - mctx := metrics.Context(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchStartTime = sdk.NowTime() - } - } - credentials, err := s.credentialsProvider.Retrieve(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } @@ -337,20 +321,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) } - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignStartTime = sdk.NowTime() - } - } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index bb61904e1d..7ed91d5bac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -1,48 +1,41 @@ -// Package v4 implements signing for AWS V4 signer +// Package v4 implements the AWS signature version 4 algorithm (commonly known +// as SigV4). // -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. +// For more information about SigV4, see [Signing AWS API requests] in the IAM +// user guide. // -// # Standalone Signer +// While this implementation CAN work in an external context, it is developed +// primarily for SDK use and you may encounter fringe behaviors around header +// canonicalization. // -// Generally using the signer outside of the SDK should not require any additional +// # Pre-escaping a request URI // -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// AWS v4 signature validation requires that the canonical string's URI path +// component must be the escaped form of the HTTP request's path. +// +// The Go HTTP client will perform escaping automatically on the HTTP request. +// This may cause signature validation errors because the request differs from +// the URI path or query from which the signature was generated. // -// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. +// Because of this, we recommend that you explicitly escape the request when +// using this signer outside of the SDK to prevent possible signature mismatch. +// This can be done by setting URL.Opaque on the request. The signer will +// prefer that value, falling back to the return of URL.EscapedPath if unset. // -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: +// When setting URL.Opaque you must do so in the form of: // // "///" // // // e.g. // "//example.com/some/path" // -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath +// The leading "//" and hostname are required or the escaping will not work +// correctly. // -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. +// The TestStandaloneSign unit test provides a complete example of using the +// signer outside of the SDK and pre-escaping the URI path. // -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. +// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html package v4 import ( @@ -401,7 +394,18 @@ func (s *httpSigner) buildCredentialScope() string { func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { query := url.Values{} unsignedHeaders := http.Header{} + + // A list of headers to be converted to lower case to mitigate a limitation from S3 + lowerCaseHeaders := map[string]string{ + "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508 + "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764 + } + for k, h := range header { + if newKey, ok := lowerCaseHeaders[k]; ok { + k = newKey + } + if r.IsValid(k) { query[k] = h } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index d5e6071fa2..8325c8a0be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,101 @@ +# v1.27.33 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.32 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.31 (2024-08-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.30 (2024-08-23) + +* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file + +# v1.27.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.17 (2024-06-03) + +* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.27.9 (2024-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 50582d89d5..d5226cb043 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -80,6 +80,9 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile resolveRequestMinCompressSizeBytes, + + // Sets the AccountIDEndpointMode if present in env var or shared config profile + resolveAccountIDEndpointMode, } // A Config represents a generic configuration value or set of values. This type diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 88550198cc..3a06f1412a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -80,6 +80,9 @@ const ( awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" + + awsAccountIDEnv = "AWS_ACCOUNT_ID" + awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" ) var ( @@ -290,6 +293,9 @@ type EnvConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + // Indicates whether account ID will be required/ignored in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -309,6 +315,7 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { + creds.AccountID = os.Getenv(awsAccountIDEnv) creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) cfg.Credentials = creds } @@ -389,6 +396,10 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { + return cfg, err + } + return cfg, nil } @@ -417,6 +428,10 @@ func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, return *c.RequestMinCompressSizeBytes, true, nil } +func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -491,6 +506,28 @@ func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { return nil } +func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch value { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) + } + break + } + return nil +} + // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 00ee204918..411bda659d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.9" +const goModuleVersion = "1.27.33" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 06596c1b7c..5f643977b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -215,6 +215,8 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { @@ -278,6 +280,10 @@ func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, return *o.RequestMinCompressSizeBytes, true, nil } +func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil +} + // WithRegion is a helper function to construct functional options // that sets Region on config's LoadOptions. Setting the region to // an empty string, will result in the region value being ignored. @@ -323,6 +329,17 @@ func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOpt } } +// WithAccountIDEndpointMode is a helper function to construct functional options +// that sets AccountIDEndpointMode on config's LoadOptions +func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + if m != "" { + o.AccountIDEndpointMode = m + } + return nil + } +} + // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { @@ -824,7 +841,14 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. // -// Deprecated: See WithEndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Use of +// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolver = v @@ -844,6 +868,9 @@ func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.En // that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [WithEndpointResolver]. func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolverWithOptions = v diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 13745fc98f..043781f1f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -225,6 +225,23 @@ func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value return } +// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode +type accountIDEndpointModeProvider interface { + getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) +} + +func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(accountIDEndpointModeProvider); ok { + value, found, err = p.getAccountIDEndpointMode(ctx) + if err != nil || found { + break + } + } + } + return +} + // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index fde2e3980e..41009c7da0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -166,6 +166,22 @@ func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, co return nil } +// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's +// SharedConfig or EnvConfig +func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { + m, found, err := getAccountIDEndpointMode(ctx, configs) + if err != nil { + return err + } + + if !found { + m = aws.AccountIDEndpointModePreferred + } + + cfg.AccountIDEndpointMode = m + return nil +} + // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index c546cb7d0f..d7a2b5307e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -115,6 +115,9 @@ const ( requestMinCompressionSizeBytes = "request_min_compression_size_bytes" s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" + + accountIDKey = "aws_account_id" + accountIDEndpointMode = "account_id_endpoint_mode" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -341,6 +344,8 @@ type SharedConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1124,12 +1129,17 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) } + if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) + } + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), SecretAccessKey: section.String(secretAccessKey), SessionToken: section.String(sessionTokenKey), Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + AccountID: section.String(accountIDKey), } if creds.HasKeys() { @@ -1177,6 +1187,26 @@ func updateDisableRequestCompression(disable **bool, sec ini.Section, key string return nil } +func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch v { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) + } + + return nil +} + func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1191,6 +1221,10 @@ func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, b return *c.DisableRequestCompression, true, nil } +func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 399f089697..dfb3dc94b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,96 @@ +# v1.17.32 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.31 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.30 (2024-08-26) + +* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility. + +# v1.17.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.9 (2024-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go index 9a869f8954..dc291c97cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -128,6 +128,7 @@ type GetCredentialsOutput struct { AccessKeyID string SecretAccessKey string Token string + AccountID string } // EndpointError is an error returned from the endpoint service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 0c3c4d6826..2386153a9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -152,6 +152,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.Token, Source: ProviderName, + AccountID: resp.AccountID, } if resp.Expiration != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 2b4ff3895b..e7467a7d98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.9" +const goModuleVersion = "1.17.32" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index fe9345e287..911fcc3272 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -167,6 +167,9 @@ type CredentialProcessResponse struct { // The date on which the current credentials expire. Expiration *time.Time + + // The ID of the account for credentials + AccountID string `json:"AccountId"` } // Retrieve executes the credential process command and returns the @@ -208,6 +211,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { AccessKeyID: resp.AccessKeyID, SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.SessionToken, + AccountID: resp.AccountID, } // Handle expiration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go index 3b97e6dd40..46ae2f9231 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -225,7 +225,7 @@ func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { } func (r *rfc3339) MarshalJSON() ([]byte, error) { - value := time.Time(*r).Format(time.RFC3339) + value := time.Time(*r).UTC().Format(time.RFC3339) // Use JSON unmarshal to unescape the quoted value making use of JSON's // quoting rules. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go index b3cf7853e7..8c230be8eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -129,6 +129,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { CanExpire: true, Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), Source: ProviderName, + AccountID: p.options.AccountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index 289707b6de..4c7f7993f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -308,6 +308,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err return aws.Credentials{Source: ProviderName}, err } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + return aws.Credentials{ AccessKeyID: *resp.Credentials.AccessKeyId, SecretAccessKey: *resp.Credentials.SecretAccessKey, @@ -316,5 +321,6 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index ddaf6df6ce..b4b7197086 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -135,6 +136,11 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. @@ -145,6 +151,19 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials Source: WebIdentityProviderName, CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, } return value, nil } + +// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" +func getAccountID(u *types.AssumedRoleUser) string { + if u.Arn == nil { + return "" + } + parts := strings.Split(*u.Arn, ":") + if len(parts) < 5 { + return "" + } + return parts[4] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index e07fb5ca70..821fd36539 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,56 @@ +# v1.16.13 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.0 (2024-03-21) * **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index a44cd1b79f..949c9ca46b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.0" +const goModuleVersion = "1.16.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go index 0c5a2d40c9..24db8e144c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -5,6 +5,7 @@ import ( "fmt" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" "github.com/aws/smithy-go/auth" @@ -39,7 +40,10 @@ func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + signingTime := sdk.NowTime() + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime = signingTime.Add(skew) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 86f5b13725..f6833e54c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,56 @@ +# v1.3.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.4 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index d25782e9ce..3bcd95b80c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.4" +const goModuleVersion = "1.3.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go new file mode 100644 index 0000000000..f0c283d394 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go @@ -0,0 +1,52 @@ +package context + +import ( + "context" + "time" + + "github.com/aws/smithy-go/middleware" +) + +type s3BackendKey struct{} +type checksumInputAlgorithmKey struct{} +type clockSkew struct{} + +const ( + // S3BackendS3Express identifies the S3Express backend + S3BackendS3Express = "S3Express" +) + +// SetS3Backend stores the resolved endpoint backend within the request +// context, which is required for a variety of custom S3 behaviors. +func SetS3Backend(ctx context.Context, typ string) context.Context { + return middleware.WithStackValue(ctx, s3BackendKey{}, typ) +} + +// GetS3Backend retrieves the stored endpoint backend within the context. +func GetS3Backend(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) + return v +} + +// SetChecksumInputAlgorithm sets the request checksum algorithm on the +// context. +func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) +} + +// GetChecksumInputAlgorithm returns the checksum algorithm from the context. +func GetChecksumInputAlgorithm(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) + return v +} + +// SetAttemptSkewContext sets the clock skew value on the context +func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { + return middleware.WithStackValue(ctx, clockSkew{}, v) +} + +// GetAttemptSkewContext gets the clock skew value from the context +func GetAttemptSkewContext(ctx context.Context) time.Duration { + x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) + return x +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go index ba6032758a..91414afe81 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -12,11 +12,12 @@ type Partition struct { // PartitionConfig provides the endpoint metadata for an AWS region or partition. type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` + ImplicitGlobalRegion string `json:"implicitGlobalRegion"` } type RegionOverrides struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 849beffd7d..5f0779997d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -13,11 +13,12 @@ var partitions = []Partition{ ID: "aws", RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-east-1", }, Regions: map[string]RegionOverrides{ "af-south-1": { @@ -111,6 +112,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ca-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-central-1": { Name: nil, DnsSuffix: nil, @@ -229,11 +237,12 @@ var partitions = []Partition{ ID: "aws-cn", RegionRegex: "^cn\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "cn-northwest-1", }, Regions: map[string]RegionOverrides{ "aws-cn-global": { @@ -263,11 +272,12 @@ var partitions = []Partition{ ID: "aws-us-gov", RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides{ "aws-us-gov-global": { @@ -297,11 +307,12 @@ var partitions = []Partition{ ID: "aws-iso", RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-global": { @@ -331,11 +342,12 @@ var partitions = []Partition{ ID: "aws-iso-b", RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-b-global": { @@ -358,23 +370,33 @@ var partitions = []Partition{ ID: "aws-iso-e", RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eu-isoe-west-1", + }, + Regions: map[string]RegionOverrides{ + "eu-isoe-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, }, - Regions: map[string]RegionOverrides{}, }, { ID: "aws-iso-f", RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index f376f6908a..712d31e3fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -44,6 +44,9 @@ "ap-southeast-4" : { "description" : "Asia Pacific (Melbourne)" }, + "ap-southeast-5" : { + "description" : "Asia Pacific (Malaysia)" + }, "aws-global" : { "description" : "AWS Standard global region" }, @@ -198,7 +201,11 @@ "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } }, { "id" : "aws-iso-f", "outputs" : { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 5bb02f574f..a6860c0e23 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,56 @@ +# v2.6.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.4 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index bb857bcb97..5403b821c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.4" +const goModuleVersion = "2.6.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index c0e54faff2..be61098b46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.8.1 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + # v1.8.0 (2024-02-13) * **Feature**: Bump minimum Go version to 1.20 per our language support policy. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 6e0b906c34..ef6a38110e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.0" +const goModuleVersion = "1.8.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go new file mode 100644 index 0000000000..8e24a3f0a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "context" + "sync/atomic" + "time" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. +// This can be read by other operations (such as signing) to correct the date value they send +// on the request +type AddTimeOffsetMiddleware struct { + Offset *atomic.Int64 +} + +// ID the identifier for AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } + +// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. +func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + if m.Offset != nil { + offset := time.Duration(m.Offset.Load()) + ctx = internalcontext.SetAttemptSkewContext(ctx, offset) + } + return next.HandleBuild(ctx, in) +} + +// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer +// held by AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { + m.Offset.Store(v.Nanoseconds()) + } + return next.HandleDeserialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index cac6f926eb..56f89df8d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.11.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.11.3 (2024-06-28) + +* No change notes available for this release. + +# v1.11.2 (2024-03-29) + +* No change notes available for this release. + # v1.11.1 (2024-02-21) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index c5ae0f8735..47d97ccfb9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.1" +const goModuleVersion = "1.11.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index ead169d5cf..1203b556e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,56 @@ +# v1.11.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.6 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 98bea53bdd..0259bbfb76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.6" +const goModuleVersion = "1.11.19" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 5a5083094b..a72bd91dfb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,82 @@ +# v1.22.7 (2024-09-04) + +* No change notes available for this release. + +# v1.22.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.4 (2024-07-18) + +* No change notes available for this release. + +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-05-23) + +* No change notes available for this release. + +# v1.20.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.20.5 (2024-04-05) + +* No change notes available for this release. + +# v1.20.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.20.3 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index fff457735b..a06c6e738f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS Single Sign-On. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -441,6 +450,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -484,6 +517,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 4b21e8b00a..5ce00b4961 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -30,9 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -113,6 +114,12 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index e44da697c5..f20e3acbfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -29,9 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -118,6 +119,12 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { return err } @@ -142,14 +149,6 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack return nil } -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - // ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles type ListAccountRolesPaginatorOptions struct { // The number of items that clients can request per page. @@ -213,6 +212,9 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -232,6 +234,14 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func return result, nil } +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 2d7add067f..391b567db9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,9 +12,10 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. +// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity +// Center User Guide. This operation returns a paginated response. +// +// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { if params == nil { params = &ListAccountsInput{} @@ -32,9 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -116,6 +118,12 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountsValidationMiddleware(stack); err != nil { return err } @@ -140,13 +148,6 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op return nil } -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - // ListAccountsPaginatorOptions is the paginator options for ListAccounts type ListAccountsPaginatorOptions struct { // This is the number of items clients can request per page. @@ -210,6 +211,9 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +233,13 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op return result, nil } +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 3ee682d19e..456e4a3717 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -12,16 +12,20 @@ import ( // Removes the locally stored SSO tokens from the client-side cache and sends an // API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. If a user uses IAM Identity -// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is -// used to obtain an IAM session, as specified in the corresponding IAM Identity -// Center permission set. More specifically, IAM Identity Center assumes an IAM -// role in the target account on behalf of the user, and the corresponding -// temporary AWS credentials are returned to the client. After user logout, any -// existing IAM role sessions that were created by using IAM Identity Center -// permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, IAM +// Identity Center assumes an IAM role in the target account on behalf of the user, +// and the corresponding temporary AWS credentials are returned to the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured in +// the permission set. For more information, see [User authentications]in the IAM Identity Center User +// Guide. +// +// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -39,9 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -108,6 +113,12 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpLogoutValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index 3b28e825dd..a93a77cd7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -169,7 +169,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 8bba205f43..d6297fa6a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -13,12 +13,22 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpGetRoleCredentials struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index 59456d5dc2..7f6e429fda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -6,16 +6,22 @@ // AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web // service that makes it easy for you to assign user access to IAM Identity Center // resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. Although AWS -// Single Sign-On was renamed, the sso and identitystore API namespaces will -// continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . This reference guide describes the IAM Identity Center Portal operations that +// and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API +// namespaces will continue to retain their original name for backward +// compatibility purposes. For more information, see [IAM Identity Center rename]. +// +// This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and -// errors. AWS provides SDKs that consist of libraries and sample code for various +// errors. +// +// AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// . +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 76521eec0e..75ae283ef8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index e98c0f328e..012580e77f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.3" +const goModuleVersion = "1.22.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index 0a00b256e1..081867b3da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -94,7 +94,7 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), @@ -187,6 +187,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -227,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -259,6 +275,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 5dee7e53f4..0ba182e976 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 8dc02296b1..07ac468e31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -25,22 +25,24 @@ type AccountInfo struct { type RoleCredentials struct { // The identifier used for the temporary security credentials. For more - // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html AccessKeyId *string // The date on which temporary security credentials expire. Expiration int64 - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SecretAccessKey *string - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SessionToken *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index c6d5ae92e3..6924f05836 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,82 @@ +# v1.26.7 (2024-09-04) + +* No change notes available for this release. + +# v1.26.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2024-07-03) + +* No change notes available for this release. + +# v1.26.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.25.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-05-23) + +* No change notes available for this release. + +# v1.24.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-05-10) + +* **Feature**: Updated request parameters for PKCE support. + +# v1.23.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.23.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.23.3 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 8dc643bb0c..25cd1c0488 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS SSO OIDC. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -441,6 +450,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -484,6 +517,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 63f1eeb131..8b829188eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -32,34 +32,43 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF type CreateTokenInput struct { // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClient API. + // from the result of the RegisterClientAPI. // // This member is required. ClientId *string // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClient API. + // persisted result of the RegisterClientAPI. // // This member is required. ClientSecret *string // Supports the following OAuth grant types: Device Code and Refresh Token. // Specify either of the following values, depending on the grant type that you - // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh - // Token - refresh_token For information about how to obtain the device code, see - // the StartDeviceAuthorization topic. + // want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization topic. // // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateToken API. + // is currently unsupported for the CreateTokenAPI. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorization API. + // of the StartDeviceAuthorizationAPI. DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -69,16 +78,18 @@ type CreateTokenInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is requested. The access token that // is issued is limited to the scopes that are granted. If this value is not // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient . + // client during the call to RegisterClient. Scope []string noSmithyDocumentSerde @@ -86,7 +97,8 @@ type CreateTokenInput struct { type CreateTokenOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -94,18 +106,22 @@ type CreateTokenOutput struct { // The idToken is not implemented or supported. For more information about the // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A JSON Web Token (JWT) that identifies who is associated with the issued - // access token. + // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. + // + // A JSON Web Token (JWT) that identifies who is associated with the issued access + // token. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html IdToken *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used to notify the client that the returned token is an access token. The @@ -170,6 +186,12 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index 6340953894..af04c251a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -12,8 +12,8 @@ import ( // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// short-term credentials for the assigned Amazon Web Services accounts or to +// access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { params = &CreateTokenWithIAMInput{} @@ -39,10 +39,15 @@ type CreateTokenWithIAMInput struct { // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: * Authorization Code - authorization_code * - // Refresh Token - refresh_token * JWT Bearer - - // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - - // urn:ietf:params:oauth:grant-type:token-exchange + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange // // This member is required. GrantType *string @@ -59,6 +64,11 @@ type CreateTokenWithIAMInput struct { // in the Authorization Code GrantOptions for the application. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Authorization Code grant type. This // value specifies the location of the client or application that has registered to // receive the authorization code. @@ -66,16 +76,21 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that the requester can receive. The following values - // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * - // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + // are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token RequestedTokenType *string // The list of scopes for which authorization is requested. The access token that @@ -94,8 +109,9 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token + // following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token SubjectTokenType *string noSmithyDocumentSerde @@ -103,7 +119,8 @@ type CreateTokenWithIAMInput struct { type CreateTokenWithIAMOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -114,17 +131,21 @@ type CreateTokenWithIAMOutput struct { IdToken *string // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token * Refresh Token - - // urn:ietf:params:oauth:token-type:refresh_token + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token IssuedTokenType *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is granted. The access token that is @@ -196,6 +217,12 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 09f016ec1e..d8c766c989 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -41,6 +41,25 @@ type RegisterClientInput struct { // This member is required. ClientType *string + // This IAM Identity Center application ARN is used to define + // administrator-managed configuration for public client access to resources. At + // authorization, the scopes, grants, and redirect URI available to this client + // will be restricted by this application resource. + EntitledApplicationArn *string + + // The list of OAuth 2.0 grant types that are defined by the client. This list is + // used to restrict the token granting flows available to the client. + GrantTypes []string + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent can + // be redirected back to. + RedirectUris []string + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []string @@ -128,6 +147,12 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index c568805b22..7c2b38ba90 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -30,22 +30,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the - // RegisterClient API operation. + // Identity Center. This value should come from the persisted result of the RegisterClientAPI + // operation. // // This member is required. ClientId *string // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClient API operation. + // the persisted result of the RegisterClientAPI operation. // // This member is required. ClientSecret *string - // The URL for the Amazon Web Services access portal. For more information, see - // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] // in the IAM Identity Center User Guide. // + // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + // // This member is required. StartUrl *string @@ -136,6 +137,12 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index 40b3becb9f..e6058da813 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -163,7 +163,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index 76a1160ece..05e8c6b7e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -13,11 +13,21 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpCreateToken struct { } @@ -581,12 +591,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response case strings.EqualFold("InvalidClientMetadataException", errorCode): return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + case strings.EqualFold("InvalidRedirectUriException", errorCode): + return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) + case strings.EqualFold("InvalidRequestException", errorCode): return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) case strings.EqualFold("InvalidScopeException", errorCode): return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1158,6 +1174,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res return output } +func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRedirectUriException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidRequestException{} var buff [1024]byte @@ -1717,6 +1769,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran return nil } +func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRedirectUriException + if *v == nil { + sv = &types.InvalidRedirectUriException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 53cd4f55a0..1d258e5677 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -6,33 +6,41 @@ // IAM Identity Center OpenID Connect (OIDC) is a web service that enables a // client (such as CLI or a native application) to register with IAM Identity // Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. IAM -// Identity Center uses the sso and identitystore API namespaces. Considerations -// for Using This Guide Before you begin using this guide, we recommend that you -// first review the following important information about how the IAM Identity -// Center OIDC service works. +// upon successful authentication and authorization with IAM Identity Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// // - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ( -// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the CLI. +// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to +// enable single sign-on authentication with the CLI. +// // - With older versions of the CLI, the service only emits OIDC access tokens, // so to obtain a new token, users must explicitly re-authenticate. To access the // OIDC flow that supports token refresh and doesn’t require re-authentication, // update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with // support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see Configure Amazon Web Services access -// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) -// . +// durations. For more information, see [Configure Amazon Web Services access portal session duration]. +// // - The access tokens provided by this service grant access to all Amazon Web // Services account entitlements assigned to an IAM Identity Center user, not just // a particular application. +// // - The documentation in this guide does not describe the mechanism to convert // the access token into Amazon Web Services Auth (“sigv4”) credentials for use // with IAM-protected Amazon Web Services service endpoints. For more information, -// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. +// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity +// Center User Guide. // -// For general information about IAM Identity Center, see What is IAM Identity -// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the IAM Identity Center User Guide. +// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html +// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html +// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 +// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 94e835e711..d7099721fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index e81f202fd8..880f8bbeff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.3" +const goModuleVersion = "1.26.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index aa20725343..b4c61ebad9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -94,7 +94,7 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), @@ -187,6 +187,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -227,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -259,6 +275,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index b964e7e109..a012e4cb8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index 754218b78e..04411bd616 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -95,6 +95,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.DeviceCode != nil { ok := object.Key("deviceCode") ok.String(*v.DeviceCode) @@ -207,6 +212,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithI ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.GrantType != nil { ok := object.Key("grantType") ok.String(*v.GrantType) @@ -324,6 +334,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, ok.String(*v.ClientType) } + if v.EntitledApplicationArn != nil { + ok := object.Key("entitledApplicationArn") + ok.String(*v.EntitledApplicationArn) + } + + if v.GrantTypes != nil { + ok := object.Key("grantTypes") + if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { + return err + } + } + + if v.IssuerUrl != nil { + ok := object.Key("issuerUrl") + ok.String(*v.IssuerUrl) + } + + if v.RedirectUris != nil { + ok := object.Key("redirectUris") + if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { + return err + } + } + if v.Scopes != nil { ok := object.Key("scopes") if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { @@ -419,6 +453,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic return nil } +func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index 86b62049fd..2cfe7b48fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -188,7 +188,7 @@ func (e *InvalidClientMetadataException) ErrorCode() string { func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateToken request with an invalid grant type. +// makes a CreateTokenrequest with an invalid grant type. type InvalidGrantException struct { Message *string @@ -217,6 +217,36 @@ func (e *InvalidGrantException) ErrorCode() string { } func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRedirectUriException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRedirectUriException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRedirectUriException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Indicates that something is wrong with the input to the request. For example, a // required parameter might be missing or out of range. type InvalidRequestException struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 1c50319455..9bcf2568ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,78 @@ +# v1.30.7 (2024-09-04) + +* No change notes available for this release. + +# v1.30.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.5 (2024-08-22) + +* No change notes available for this release. + +# v1.30.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.29.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2024-05-23) + +* No change notes available for this release. + +# v1.28.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.28.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.28.5 (2024-03-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 4d18dc86bd..acd2b8e7a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -15,15 +15,18 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -34,6 +37,9 @@ const ServiceAPIVersion = "2011-06-15" // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -72,6 +78,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -233,15 +241,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -445,6 +454,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -488,6 +521,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index e0e2c9c2e8..e74fc8ba9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,69 +16,99 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service -// with the following exception: You cannot call the Amazon Web Services STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: a role -// trust policy that specifies who can assume the role, and a permissions policy -// that specifies what can be done with the role. You specify the trusted principal -// that is allowed to assume the role in the role trust policy. To assume a role -// from a different account, your Amazon Web Services account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when the -// role is created. That trust policy states which accounts are allowed to delegate -// that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the account -// administrator. The administrator must attach a policy that allows the user to -// call AssumeRole for the ARN of the role in the other account. To allow a user -// to assume a role in the same account, you can do either of the following: +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the +// IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: You +// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies what +// can be done with the role. You specify the trusted principal that is allowed to +// assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have +// permissions that are delegated from the account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of the +// following: +// // - Attach a policy to the user that allows the user to call AssumeRole (as long // as the role's trust policy trusts the account). +// // - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the same // account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your -// session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole . This is -// useful for cross-account scenarios to ensure that the user that assumes the role -// has been authenticated with an Amazon Web Services MFA device. In that scenario, -// the trust policy of the role being assumed includes a condition that tests for -// MFA authentication. If the caller does not include valid MFA information, the -// request to assume the role is denied. The condition in a trust policy that tests -// for MFA authentication might look like the following example. "Condition": -// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for -// the SerialNumber and TokenCode parameters. The SerialNumber value identifies -// the user's hardware or virtual MFA device. The TokenCode is the time-based -// one-time password (TOTP) that the MFA device produces. +// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM +// User Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information when +// you call AssumeRole . This is useful for cross-account scenarios to ensure that +// the user that assumes the role has been authenticated with an Amazon Web +// Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication might +// look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. +// +// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that the +// MFA device produces. +// +// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -101,17 +131,19 @@ type AssumeRoleInput struct { // This member is required. RoleArn *string - // An identifier for the assumed role session. Use the role session name to - // uniquely identify a session when the same role is assumed by different - // principals or for different reasons. In cross-account scenarios, the role - // session name is visible to, and can be logged by the account that owns the role. - // The role session name is also used in the ARN of the assumed role principal. - // This means that subsequent cross-account API requests that use the temporary - // security credentials will expose the role session name to the external account - // in their CloudTrail logs. The regex used to validate this parameter is a string - // of characters consisting of upper- and lower-case alphanumeric characters with - // no spaces. You can also include underscores or any of the following characters: - // =,.@- + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role is + // assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the account + // that owns the role. The role session name is also used in the ARN of the assumed + // role principal. This means that subsequent cross-account API requests that use + // the temporary security credentials will expose the role session name to the + // external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -122,23 +154,27 @@ type AssumeRoleInput struct { // hours. If you specify a value higher than this setting or the administrator // setting (whichever is lower), the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web - // Services CLI or Amazon Web Services API role session to a maximum of one hour. - // When you use the AssumeRole API operation to assume a role, you can specify the - // duration of your role session with the DurationSeconds parameter. You can - // specify a parameter value of up to 43200 seconds (12 hours), depending on the - // maximum session duration setting for your role. However, if you assume a role - // using role chaining and provide a DurationSeconds parameter value greater than - // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API + // role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of up to + // 43200 seconds (12 hours), depending on the maximum session duration setting for + // your role. However, if you assume a role using role chaining and provide a + // DurationSeconds parameter value greater than one hour, the operation fails. To + // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // A unique identifier that might be required when you assume a role in another @@ -149,63 +185,79 @@ type AssumeRoleInput struct { // the administrator of the trusting account might send an external ID to the // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about - // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@:/- + // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + // + // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html ExternalId *string // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of previously acquired trusted context assertions in the format of a // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. The following is an example of a ProvidedContext value that - // includes a single trusted context assertion and the ARN of the context provider - // from which the trusted context assertion was generated. - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + // Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which the + // trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []types.ProvidedContext // The identification number of the MFA device that is associated with the user @@ -213,79 +265,97 @@ type AssumeRoleInput struct { // the role being assumed includes a condition that requires MFA authentication. // The value is either the serial number for a hardware device (such as // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter - // is a string of characters consisting of upper- and lower-case alphanumeric - // characters with no spaces. You can also include underscores or any of the - // following characters: =,.@- + // arn:aws:iam::123456789012:mfa/user ). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws: . This prefix is - // reserved for Amazon Web Services internal use. + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters, and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the role. When you do, session - // tags override a role tag with the same key. Tag key–value pairs are not case - // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the Department = - // Marketing tag and you pass the department = engineering session tag. Department - // and department are not saved as separate tags, and the session tag passed in - // the request takes precedence over the role tag. Additionally, if you used - // temporary credentials to perform this operation, the new session inherits any - // transitive session tags from the calling session. If you pass a session tag with - // the same key as an inherited tag, the operation fails. To view the inherited - // tags for a session, see the CloudTrail logs. For more information, see Viewing - // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // Additionally, if you used temporary credentials to perform this operation, the + // new session inherits any transitive session tags from the calling session. If + // you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. For + // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. + // + // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs Tags []types.Tag // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA. (In other words, if the policy includes a condition that // tests for MFA). If the role being assumed requires MFA and if the TokenCode // value is missing or expired, the AssumeRole call returns an "access denied" - // error. The format for this parameter, as described by its regex pattern, is a - // sequence of six numeric digits. + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string // A list of keys for session tags that you want to set as transitive. If you set // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. When you set session tags as - // transitive, the session policy and session tags packed binary limit is not - // affected. If you choose not to specify a transitive tag key, then no tags are - // passed from this session to any subsequent sessions. + // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed from + // this session to any subsequent sessions. + // + // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining TransitiveTagKeys []string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. +// Contains the response to a successful AssumeRole request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -296,9 +366,10 @@ type AssumeRoleOutput struct { AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -308,17 +379,21 @@ type AssumeRoleOutput struct { PackedPolicySize *int32 // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // Metadata pertaining to the operation's result. @@ -382,6 +457,12 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 2a57b72ac9..4c685abd5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,92 +16,132 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this -// operation consist of an access key ID, a secret access key, and a security -// token. Applications can use these temporary security credentials to sign calls -// to Amazon Web Services services. Session Duration By default, the temporary -// security credentials created by AssumeRoleWithSAML last for one hour. However, -// you can use the optional DurationSeconds parameter to specify the duration of -// your session. Your role session lasts for the duration that you specify, or -// until the time specified in the SAML authentication response's -// SessionNotOnOrAfter value, whichever is shorter. You can provide a -// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour to 12 -// hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of an +// access key ID, a secret access key, and a security token. Applications can use +// these temporary security credentials to sign calls to Amazon Web Services +// services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML +// authentication response's SessionNotOnOrAfter value, whichever is shorter. You +// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the +// maximum session duration setting for the role. This setting can have a value +// from 1 hour to 12 hours. To learn how to view the maximum value for your role, +// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console URL. +// For more information, see [Using IAM Roles]in the IAM User Guide. +// +// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. // You can specify a parameter value of up to 43200 seconds (12 hours), depending // on the maximum session duration setting for your role. However, if you assume a // role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. Permissions The temporary security -// credentials created by AssumeRoleWithSAML can be used to make API calls to any -// Amazon Web Services service with the following exception: you cannot call the -// STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of -// Amazon Web Services security credentials. The identity of the caller is -// validated by using keys in the metadata document that is uploaded for the SAML -// provider entity for your identity provider. Calling AssumeRoleWithSAML can -// result in an entry in your CloudTrail logs. The entry includes the value in the -// NameID element of the SAML assertion. We recommend that you use a NameIDType -// that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier ( -// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can -// configure your IdP to pass attributes into your SAML assertion as session tags. -// Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, session tags -// override the role's tags with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to -// issue the claims required by Amazon Web Services. Additionally, you must use -// Identity and Access Management (IAM) to create a SAML provider entity in your -// Amazon Web Services account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used to +// make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys in +// the metadata document that is uploaded for the SAML provider entity for your +// identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The +// entry includes the value in the NameID element of the SAML assertion. We +// recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the +// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML +// assertion as session tags. Each session tag consists of a key name and an +// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, session tags override the role's tags with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML , you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web +// Services. Additionally, you must use Identity and Access Management (IAM) to +// create a SAML provider entity in your Amazon Web Services account that +// represents your identity provider. You must also create an IAM role that +// specifies this SAML provider in its trust policy. +// // For more information, see the following resources: -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// +// [About SAML 2.0-based Federation] +// - in the IAM User Guide. +// +// [Creating SAML Identity Providers] +// - in the IAM User Guide. +// +// [Configuring a Relying Party and Claims] +// - in the IAM User Guide. +// +// [Creating a Role for SAML 2.0 Federation] +// - in the IAM User Guide. +// +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html +// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -130,9 +170,11 @@ type AssumeRoleWithSAMLInput struct { // This member is required. RoleArn *string - // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. + // + // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html // // This member is required. SAMLAssertion *string @@ -146,92 +188,114 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithSAMLOutput struct { // The identifiers for the temporary security credentials that the operation // returns. AssumedRoleUser *types.AssumedRoleUser - // The value of the Recipient attribute of the SubjectConfirmationData element of + // The value of the Recipient attribute of the SubjectConfirmationData element of // the SAML assertion. Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // The value of the Issuer element of the SAML assertion. Issuer *string // A hash value based on the concatenation of the following: + // // - The Issuer response value. + // // - The Amazon Web Services account ID. + // // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. The following pseudocode shows how the hash value is calculated: BASE64 ( - // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -240,31 +304,36 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can - // require users to set a source identity value when they assume a role. You do - // this by using the sts:SourceIdentity condition key in a role trust policy. That - // way, actions that are taken with the role are associated with that user. After - // the source identity is set, the value cannot be changed. It is present in the - // request for all actions that are taken by the role and persists across chained - // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML - // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your SAML identity provider to use an + // attribute associated with your users, like user name or email, as the source + // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to + // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in + // the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // The value of the NameID element in the Subject element of the SAML assertion. Subject *string - // The format of the name ID, as defined by the Format attribute in the NameID + // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent . If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // persistent . + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , + // that prefix is removed. For example, // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . // If the format includes any other prefix, the format is returned with no // modifications. @@ -328,6 +397,12 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 98108ce6af..0b5e5a377c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,105 +14,143 @@ import ( // Returns a set of temporary security credentials for users who have been // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// . For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. To learn more about Amazon -// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not -// require the use of Amazon Web Services security credentials. Therefore, you can -// distribute an application (for example, on mobile devices) that requests -// temporary security credentials without including long-term Amazon Web Services -// credentials in the application. You also don't need to deploy server-based proxy -// services that use long-term Amazon Web Services credentials. Instead, the -// identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this API -// consist of an access key ID, a secret access key, and a security token. -// Applications can use these temporary security credentials to sign calls to -// Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for -// one hour. However, you can use the optional DurationSeconds parameter to -// specify the duration of your session. You can provide a value from 900 seconds -// (15 minutes) up to the maximum session duration setting for the role. This -// setting can have a value from 1 hour to 12 hours. To learn how to view the -// maximum value for your role, see View the Maximum Session Duration Setting for -// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web -// Services service with the following exception: you cannot call the STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass -// attributes into your web identity token as session tags. Each session tag -// consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, the session tag -// overrides the role tag with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity , you must have an identity token from a supported -// identity provider and create a role that the application can assume. The role -// that your application assumes must trust the identity provider that is -// associated with the identity token. In other words, the identity provider must -// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you could -// instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) -// . For more information about how to use web identity federation and the +// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also +// supply the user with a consistent identity throughout the lifetime of an +// application. +// +// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application (for +// example, on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service API +// operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by +// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional +// DurationSeconds parameter to specify the duration of your session. You can +// provide a value from 900 seconds (15 minutes) up to the maximum session duration +// setting for the role. This setting can have a value from 1 hour to 12 hours. To +// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. +// The maximum session duration limit applies when you use the AssumeRole* API +// operations or the assume-role* CLI commands. However the limit does not apply +// when you use those operations to create a console URL. For more information, see +// [Using IAM Roles]in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can be +// used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken API +// operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, the session tag overrides the role tag with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity , you must have an +// identity token from a supported identity provider and create a role that the +// application can assume. The role that your application assumes must trust the +// identity provider that is associated with the identity token. In other words, +// the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the [Subject]of the provided web identity token. We recommend +// that you avoid using any personally identifiable information (PII) in this +// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. +// +// For more information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// . -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) -// . Walk through the process of authenticating through Login with Amazon, +// +// [Using Web Identity Federation API Operations for Mobile Apps] +// - and [Federation Through a Web-based Identity Provider]. +// +// [Web Identity Federation Playground] +// - . Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then using // those credentials to make a request to Amazon Web Services. -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// . These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these +// +// [Amazon Web Services SDK for iOS Developer Guide] +// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the +// identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) -// . This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// +// [Web Identity Federation with Mobile Applications] +// - . This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon S3. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications +// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html +// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -139,10 +177,11 @@ type AssumeRoleWithWebIdentityInput struct { // identifier that is associated with the user who is using your application. That // way, the temporary security credentials that your application will use are // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. The regex used to - // validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@- + // assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -162,73 +201,90 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // The fully qualified host component of the domain name of the OAuth 2.0 identity // provider. Do not specify this value for an OpenID Connect identity provider. + // // Currently www.amazon.com and graph.facebook.com are the only supported identity // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. Do not specify this value for OpenID Connect ID tokens. + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. ProviderId *string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. +// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithWebIdentityOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -244,9 +300,10 @@ type AssumeRoleWithWebIdentityOutput struct { Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. The size of the security token that STS API - // operations return is not fixed. We strongly recommend that you make no - // assumptions about the maximum size. + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -255,30 +312,34 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect + // The issuing authority of the web identity token presented. For OpenID Connect // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed in // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. You can require users to set a source identity value - // when they assume a role. You do this by using the sts:SourceIdentity condition - // key in a role trust policy. That way, actions that are taken with the role are - // associated with that user. After the source identity is set, the value cannot be - // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon + // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string // The unique user identifier that is returned by the identity provider. This @@ -347,6 +408,12 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b4ad54ab2f..b1f14d28ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -11,28 +11,39 @@ import ( ) // Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. For -// example, if a user is not authorized to perform an operation that he or she has -// requested, the request returns a Client.UnauthorizedOperation response (an HTTP -// 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. Only -// certain Amazon Web Services operations return an encoded authorization message. -// The documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. The message is -// encoded because the details of the authorization status can contain privileged -// information that the user who requested the operation should not see. To decode -// an authorization status message, a user must be granted permissions through an -// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) -// action. The decoded message includes the following type of information: +// an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he or she +// has requested, the request returns a Client.UnauthorizedOperation response (an +// HTTP 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether that +// operation returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation should +// not see. To decode an authorization status message, a user must be granted +// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( +// sts:DecodeAuthorizationMessage ) action. +// +// The decoded message includes the following type of information: +// // - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether a -// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User +// Guide. +// // - The principal who made the request. +// // - The requested action. +// // - The requested resource. +// // - The values of condition keys in the context of the user's request. +// +// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow +// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} @@ -127,6 +138,12 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index 1f7cbcc2bb..3ba00873db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -10,23 +10,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and -// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). -// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. When you pass an access key ID to this operation, it -// returns the ID of the Amazon Web Services account to which the keys belong. -// Access key IDs beginning with AKIA are long-term credentials for an IAM user or -// the Amazon Web Services account root user. Access key IDs beginning with ASIA -// are temporary credentials that are created using STS operations. If the account -// in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. This operation does not indicate the state of the access -// key. The key might be active, inactive, or deleted. Active keys might not have -// permissions to perform an operation. Providing a deleted access key might return -// an error that the key doesn't exist. +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, +// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, +// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access +// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs to +// you, you can sign in as the root user and review your root user access keys. +// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who +// requested the temporary credentials for an ASIA access key, view the STS events +// in your [CloudTrail logs]in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might be +// active, inactive, or deleted. Active keys might not have permissions to perform +// an operation. Providing a deleted access key might return an error that the key +// doesn't exist. +// +// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html +// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html +// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { if params == nil { params = &GetAccessKeyInfoInput{} @@ -44,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI type GetAccessKeyInfoInput struct { - // The identifier of an access key. This parameter allows (through its regex - // pattern) a string of characters that can consist of any upper- or lowercase - // letter or digit. + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters that + // can consist of any upper- or lowercase letter or digit. // // This member is required. AccessKeyId *string @@ -120,6 +129,12 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index acb7ede44f..abac49ad2f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -12,13 +12,15 @@ import ( ) // Returns details about the IAM user or role whose credentials are used to call -// the operation. No permissions are required to perform this operation. If an -// administrator attaches a policy to your identity that explicitly denies access -// to the sts:GetCallerIdentity action, you can still perform this operation. -// Permissions are not required because the same information is returned when -// access is denied. To view an example response, see I Am Not Authorized to -// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. +// the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when access is denied. +// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. +// +// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { params = &GetCallerIdentityInput{} @@ -38,8 +40,8 @@ type GetCallerIdentityInput struct { noSmithyDocumentSerde } -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. +// Contains the response to a successful GetCallerIdentity request, including information about the +// entity making the request. type GetCallerIdentityOutput struct { // The Amazon Web Services account ID number of the account that owns or contains @@ -51,8 +53,10 @@ type GetCallerIdentityOutput struct { // The unique identifier of the calling entity. The exact value depends on the // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. + // the aws:userid column in the [Principal table]found on the Policy Variables reference page in + // the IAM User Guide. + // + // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable UserId *string // Metadata pertaining to the operation's result. @@ -116,6 +120,12 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 3679618cb5..2bae67429f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -14,74 +14,100 @@ import ( // Returns a set of temporary security credentials (consisting of an access key // ID, a secret access key, and a security token) for a user. A typical use is in a // proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the -// GetFederationToken operation using the long-term security credentials of an IAM -// user. As a result, this call is appropriate in contexts where those credentials -// can be safeguarded, usually in a server-based application. For a comparison of -// GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Although it is possible to call GetFederationToken using -// the security credentials of an Amazon Web Services account root user rather than -// an IAM user that you create for the purpose of a proxy application, we do not -// recommend it. For more information, see Safeguard your root user credentials -// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. Session duration The temporary credentials are valid for -// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the root user credentials have a maximum -// duration of 3,600 seconds (1 hour). Permissions You can use the temporary -// credentials created by GetFederationToken in any Amazon Web Services service -// with the following exceptions: +// distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based +// application. For a comparison of GetFederationToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security +// credentials of an Amazon Web Services account root user rather than an IAM user +// that you create for the purpose of a proxy application, we do not recommend it. +// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by +// using the root user credentials have a maximum duration of 3,600 seconds (1 +// hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM operations using the CLI or the Amazon Web Services // API. This limitation does not apply to console sessions. +// // - You cannot call any STS operations except GetCallerIdentity . // -// You can use temporary credentials for single sign-on (SSO) to the console. You -// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed [session policy] to this operation. You can pass a single +// JSON policy document to use as an inline session policy. You can also specify up +// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session +// policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. +// // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass // session policies, the session permissions are the intersection of the IAM user // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to create -// temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) -// . You can use the credentials to access a resource that has a resource-based +// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For +// information about using GetFederationToken to create temporary security +// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. +// +// You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by -// the session policies. Tags (Optional) You can pass tag key-value pairs to your -// session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is -// preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the Department = -// Marketing tag and you pass the department = engineering session tag. Department -// and department are not saved as separate tags, and the session tag passed in -// the request takes precedence over the user tag. +// the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This means +// that you cannot have separate Department and department tag keys. Assume that +// the user that you are federating has the Department = Marketing tag and you +// pass the department = engineering session tag. Department and department are +// not saved as separate tags, and the session tag passed in the request takes +// precedence over the user tag. +// +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito]: http://aws.amazon.com/cognito/ +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -102,10 +128,11 @@ type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the // temporary security credentials (such as Bob ). For example, you can reference // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. Name *string @@ -119,99 +146,127 @@ type GetFederationTokenInput struct { DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. This parameter is - // optional. However, if you do not pass any session policies, then the resulting - // federated user session has no permissions. When you pass session policies, the - // session permissions are the intersection of the IAM user policies and the - // session policies that you pass. This gives you a way to further restrict the - // permissions for a federated user. You cannot use session policies to grant more - // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // The plaintext that you use for both inline and managed session policies can't // exceed 2,048 characters. The JSON policy characters can be any ASCII character // from the space character to the end of the valid character list (\u0020 through // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. An Amazon Web Services conversion compresses the - // passed inline session policy, managed policy ARNs, and session tags into a - // packed binary format that has a separate limit. Your request can fail for this - // limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. The plaintext that you - // use for both inline and managed session policies can't exceed 2,048 characters. - // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. This parameter is optional. - // However, if you do not pass any session policies, then the resulting federated - // user session has no permissions. When you pass session policies, the session - // permissions are the intersection of the IAM user policies and the session - // policies that you pass. This gives you a way to further restrict the permissions - // for a federated user. You cannot use session policies to grant more permissions - // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // the IAM user that is requesting federated access. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. The plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. You can provide up to 10 managed policy + // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General + // Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext // meets the other requirements. The PackedPolicySize response element indicates // by percentage how close the policies and tags for your request are to the upper // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see Passing Session - // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the user you are federating. When - // you do, session tags override a user tag with the same key. Tag key–value pairs - // are not case sensitive, but case is preserved. This means that you cannot have - // separate Department and department tag keys. Assume that the role has the - // Department = Marketing tag and you pass the department = engineering session - // tag. Department and department are not saved as separate tags, and the session - // tag passed in the request takes precedence over the role tag. + // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User + // Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user tag + // with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length Tags []types.Tag noSmithyDocumentSerde } -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetFederationToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as @@ -287,6 +342,12 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 751fb147d4..c73316a3c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -15,43 +15,58 @@ import ( // IAM user. The credentials consist of an access key ID, a secret access key, and // a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and -// submit an MFA code that is associated with their MFA device. Using the temporary -// security credentials that the call returns, IAM users can then make programmatic -// calls to API operations that require MFA authentication. An incorrect MFA code -// causes the API to return an access denied error. For a comparison of -// GetSessionToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. No permissions are required for users to perform this -// operation. The purpose of the sts:GetSessionToken operation is to authenticate -// the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of an IAM -// user. Credentials that are created by IAM users are valid for the duration that -// you specify. This duration can range from 900 seconds (15 minutes) up to a -// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 -// hours). Credentials based on account credentials can range from 900 seconds (15 -// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The -// temporary security credentials created by GetSessionToken can be used to make -// API calls to any Amazon Web Services service with the following exceptions: +// Amazon EC2 StopInstances . +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is +// associated with their MFA device. Using the temporary security credentials that +// the call returns, IAM users can then make programmatic calls to API operations +// that require MFA authentication. An incorrect MFA code causes the API to return +// an access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose of +// the sts:GetSessionToken operation is to authenticate the user using MFA. You +// cannot use policies to control authentication operations. For more information, +// see [Permissions for GetSessionToken]in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon Web +// Services security credentials of an IAM user. Credentials that are created by +// IAM users are valid for the duration that you specify. This duration can range +// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), +// with a default of 43,200 seconds (12 hours). Credentials based on account +// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 +// hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used to +// make API calls to any Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM API operations unless MFA authentication // information is included in the request. +// // - You cannot call any STS API except AssumeRole or GetCallerIdentity . // // The credentials that GetSessionToken returns are based on permissions // associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. Although it -// is possible to call GetSessionToken using the security credentials of an Amazon -// Web Services account root user rather than an IAM user, we do not recommend it. -// If GetSessionToken is called using root user credentials, the temporary -// credentials have root user permissions. For more information, see Safeguard -// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide For more information about using GetSessionToken to -// create temporary credentials, see Temporary Credentials for Users in Untrusted -// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. +// The temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do not +// recommend it. If GetSessionToken is called using root user credentials, the +// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in +// the IAM User Guide +// +// For more information about using GetSessionToken to create temporary +// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. +// +// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} @@ -83,10 +98,11 @@ type GetSessionTokenInput struct { // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. The regex used - // to validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@:/- + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- SerialNumber *string // The value provided by the MFA device, if MFA is required. If any policy @@ -94,22 +110,24 @@ type GetSessionTokenInput struct { // authentication is required, the user must provide a code when requesting a set // of temporary security credentials. A user who fails to provide the code receives // an "access denied" response when requesting resources that require MFA - // authentication. The format for this parameter, as described by its regex - // pattern, is a sequence of six numeric digits. + // authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string noSmithyDocumentSerde } -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetSessionToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Metadata pertaining to the operation's result. @@ -173,6 +191,12 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index 9db5bfd434..e842a7f7e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -157,7 +157,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 5d634ce35c..7e4346ec9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -20,8 +20,17 @@ import ( "io" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsquery_deserializeOpAssumeRole struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index d963fd8d19..cbb19c7f66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -3,9 +3,11 @@ // Package sts provides the API client, operations, and parameter types for AWS // Security Token Service. // -// Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for users. This guide provides -// descriptions of the STS API. For more information about using this service, see -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// . +// # Security Token Service +// +// Security Token Service (STS) enables you to request temporary, +// limited-privilege credentials for users. This guide provides descriptions of the +// STS API. For more information about using this service, see [Temporary Security Credentials]. +// +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index 32e2d5435f..35305d8976 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -306,6 +306,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1045,7 +1056,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -1075,6 +1086,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -1084,7 +1099,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 8bba9b7dc1..c3a970dff4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.5" +const goModuleVersion = "1.30.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index 3dbd993b54..9fe930b8d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -94,7 +94,7 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), @@ -172,6 +172,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-4", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index 5c1be79f8c..a9a35881af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 097875b279..9573a4b646 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -65,9 +65,10 @@ func (e *IDPCommunicationErrorException) ErrorCode() string { func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. If this error is returned for the -// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired -// or has been explicitly revoked. +// because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it can +// also mean that the claim has expired or has been explicitly revoked. type IDPRejectedClaimException struct { Message *string @@ -183,11 +184,13 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You could receive this error even though you meet other -// defined session policy and session tag limits. For more information, see IAM -// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. +// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. +// +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length type PackedPolicyTooLargeException struct { Message *string @@ -215,9 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index e3701d11d1..dff7a3c2e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -11,10 +11,11 @@ import ( // returns. type AssumedRoleUser struct { - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // The ARN of the temporary security credentials that are returned from the AssumeRole + // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in + // the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -61,8 +62,9 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // see [IAM Identifiers]in the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -81,9 +83,10 @@ type FederatedUser struct { type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. + // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web + // Services General Reference. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html Arn *string noSmithyDocumentSerde @@ -107,23 +110,30 @@ type ProvidedContext struct { // You can pass custom key-value pair attributes when you assume a role or // federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. +// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User +// Guide. +// +// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html type Tag struct { - // The key for a session tag. You can pass up to 50 session tags. The plain text - // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Key *string - // The value for a session tag. You can pass up to 50 session tags. The plain text - // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Value *string diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index b8d6561a4e..96d57df805 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,19 @@ +# Release (2024-08-14) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.4 + * **Dependency Update**: Bump minimum Go version to 1.21. + +# Release (2024-06-27) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.3 + * **Bug Fix**: Fix encoding/cbor test overflow on x86. + +# Release (2024-03-29) + +* No change notes available for this release. + # Release (2024-02-21) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index c374f69283..08df74589a 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -1,19 +1,21 @@ -## Smithy Go +# Smithy Go [![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) -[Smithy](https://smithy.io/) code generators for Go. +[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. + +The smithy-go runtime requires a minimum version of Go 1.20. **WARNING: All interfaces are subject to change.** -## Can I use this? +## Can I use the code generators? In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box, +The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, therefore the useability of this project on its own is currently limited. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are @@ -21,6 +23,70 @@ tracking the movement of those out of the SDK into smithy-go in [#458](https://github.com/aws/smithy-go/issues/458), but there's currently no timeline for doing so. +## Plugins + +This repository implements the following Smithy build plugins: + +| ID | GAV prefix | Description | +|----|------------|-------------| +| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | +| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | + +**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** + +## `go-codegen` + +### Configuration + +[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java) +contains all of the settings enabled from `smithy-build.json` and helper +methods and types. The up-to-date list of top-level properties enabled for +`go-client-codegen` can be found in `GoSettings::from()`. + +| Setting | Type | Required | Description | +|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------| +| `service` | string | yes | The Shape ID of the service for which to generate the client. | +| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. | +| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. | +| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. | + +### Supported protocols + +| Protocol | Notes | +|----------|-------| +| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. | + +### Example + +This example applies the `go-codegen` build plugin to the Smithy quickstart +example created from `smithy init`: + +```json +{ + "version": "1.0", + "sources": [ + "models" + ], + "maven": { + "dependencies": [ + "software.amazon.smithy.go:smithy-go-codegen:0.1.0" + ] + }, + "plugins": { + "go-codegen": { + "service": "example.weather#Weather", + "module": "github.com/example/weather", + "generateGoMod": true, + "goDirective": "1.20" + } + } +} +``` + +## `go-server-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 341392e10f..33355b22c8 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.1" +const goModuleVersion = "1.20.4" diff --git a/vendor/github.com/buildkite/agent/v3/LICENSE.txt b/vendor/github.com/buildkite/agent/v3/LICENSE.txt index 327a143d6f..9435376d87 100644 --- a/vendor/github.com/buildkite/agent/v3/LICENSE.txt +++ b/vendor/github.com/buildkite/agent/v3/LICENSE.txt @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2014-2018 Buildkite Pty Ltd +Copyright (c) 2014 Buildkite Pty Ltd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/buildkite/agent/v3/api/annotations.go b/vendor/github.com/buildkite/agent/v3/api/annotations.go index 8bb0868823..90f5649e57 100644 --- a/vendor/github.com/buildkite/agent/v3/api/annotations.go +++ b/vendor/github.com/buildkite/agent/v3/api/annotations.go @@ -7,10 +7,11 @@ import ( // Annotation represents a Buildkite Agent API Annotation type Annotation struct { - Body string `json:"body,omitempty"` - Context string `json:"context,omitempty"` - Style string `json:"style,omitempty"` - Append bool `json:"append,omitempty"` + Body string `json:"body,omitempty"` + Context string `json:"context,omitempty"` + Style string `json:"style,omitempty"` + Append bool `json:"append,omitempty"` + Priority int `json:"priority,omitempty"` } // Annotate a build in the Buildkite UI diff --git a/vendor/github.com/buildkite/agent/v3/api/artifacts.go b/vendor/github.com/buildkite/agent/v3/api/artifacts.go index ffd9749ede..3554971165 100644 --- a/vendor/github.com/buildkite/agent/v3/api/artifacts.go +++ b/vendor/github.com/buildkite/agent/v3/api/artifacts.go @@ -47,7 +47,7 @@ type Artifact struct { UploadInstructions *ArtifactUploadInstructions `json:"-"` // A specific Content-Type to use on upload - ContentType string `json:"-"` + ContentType string `json:"content_type,omitempty"` } type ArtifactBatch struct { diff --git a/vendor/github.com/buildkite/agent/v3/api/client.go b/vendor/github.com/buildkite/agent/v3/api/client.go index 42aa57bec0..643911cded 100644 --- a/vendor/github.com/buildkite/agent/v3/api/client.go +++ b/vendor/github.com/buildkite/agent/v3/api/client.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "net" "net/http" "net/http/httputil" "net/url" @@ -24,7 +23,7 @@ import ( ) const ( - defaultEndpoint = "https://agent.buildkite.com/" + defaultEndpoint = "https://agent.buildkite.com/v3" defaultUserAgent = "buildkite-agent/api" ) @@ -48,6 +47,9 @@ type Config struct { // The http client used, leave nil for the default HTTPClient *http.Client + + // optional TLS configuration primarily used for testing + TLSConfig *tls.Config } // A Client manages communication with the Buildkite Agent API. @@ -74,28 +76,28 @@ func NewClient(l logger.Logger, conf Config) *Client { httpClient := conf.HTTPClient if conf.HTTPClient == nil { - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DisableCompression: false, - DisableKeepAlives: false, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 30 * time.Second, - } + + // use the default transport as it is optimized and configured for http2 + // and will avoid accidents in the future + tr := http.DefaultTransport.(*http.Transport).Clone() if conf.DisableHTTP2 { - t.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + tr.ForceAttemptHTTP2 = false + tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) + // The default TLSClientConfig has h2 in NextProtos, so the negotiated TLS connection will assume h2 support. + // see https://github.com/golang/go/issues/50571 + tr.TLSClientConfig.NextProtos = []string{"http/1.1"} + } + + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig } httpClient = &http.Client{ Timeout: 60 * time.Second, Transport: &authenticatedTransport{ Token: conf.Token, - Delegate: t, + Delegate: tr, }, } } @@ -292,9 +294,11 @@ func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { io.Copy(w, resp.Body) } else { if strings.Contains(req.Header.Get("Content-Type"), "application/msgpack") { - err = errors.New("Msgpack not supported") - } else { - err = json.NewDecoder(resp.Body).Decode(v) + return response, errors.New("Msgpack not supported") + } + + if err = json.NewDecoder(resp.Body).Decode(v); err != nil { + return response, fmt.Errorf("failed to decode JSON response: %w", err) } } } diff --git a/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go b/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go new file mode 100644 index 0000000000..2ec224b29d --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/github_code_access_token.go @@ -0,0 +1,63 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/buildkite/roko" +) + +type GithubCodeAccessTokenRequest struct { + RepoURL string `json:"repo_url,omitempty"` +} + +type GithubCodeAccessTokenResponse struct { + Token string `json:"token,omitempty"` +} + +func (c *Client) GenerateGithubCodeAccessToken(ctx context.Context, repoURL, jobID string) (string, *Response, error) { + u := fmt.Sprintf("jobs/%s/github_code_access_token", railsPathEscape(jobID)) + + req, err := c.newRequest(ctx, http.MethodPost, u, GithubCodeAccessTokenRequest{RepoURL: repoURL}) + if err != nil { + return "", nil, err + } + + r := roko.NewRetrier( + roko.WithMaxAttempts(3), + roko.WithStrategy(roko.Constant(5*time.Second)), + ) + + var g GithubCodeAccessTokenResponse + + resp, err := roko.DoFunc(ctx, r, func(r *roko.Retrier) (*Response, error) { + resp, err := c.doRequest(req, &g) + if err == nil { + return resp, nil + } + + if resp != nil { + if !IsRetryableStatus(resp) { + r.Break() + return resp, err + } + + if resp.Header.Get("Retry-After") != "" { + retryAfter, errParseDuration := time.ParseDuration(resp.Header.Get("Retry-After") + "s") + if errParseDuration == nil { + r.SetNextInterval(retryAfter) + } + } + } + + return resp, err + }) + + if err != nil { + return "", resp, err + } + + return g.Token, resp, nil +} diff --git a/vendor/github.com/buildkite/agent/v3/api/secrets.go b/vendor/github.com/buildkite/agent/v3/api/secrets.go new file mode 100644 index 0000000000..3bb48dd23a --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/secrets.go @@ -0,0 +1,40 @@ +package api + +import ( + "context" + "path" +) + +// GetSecretRequest represents a request to read a secret from the Buildkite Agent API. +type GetSecretRequest struct { + Key string + JobID string +} + +// Secret represents a secret read from the Buildkite Agent API. +type Secret struct { + Key string `json:"key"` + Value string `json:"value"` + UUID string `json:"uuid"` +} + +// GetSecret reads a secret from the Buildkite Agent API. +func (c *Client) GetSecret(ctx context.Context, req *GetSecretRequest) (*Secret, *Response, error) { + // the endpoint is /jobs/:job_id/secrets?key=:key + httpReq, err := c.newRequest(ctx, "GET", path.Join("jobs", railsPathEscape(req.JobID), "secrets"), nil) + if err != nil { + return nil, nil, err + } + + q := httpReq.URL.Query() + q.Add("key", req.Key) + httpReq.URL.RawQuery = q.Encode() + + secret := &Secret{} + resp, err := c.doRequest(httpReq, secret) + if err != nil { + return nil, resp, err + } + + return secret, resp, nil +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/log.go b/vendor/github.com/buildkite/agent/v3/logger/log.go index 6807b5b8a9..04ac386a28 100644 --- a/vendor/github.com/buildkite/agent/v3/logger/log.go +++ b/vendor/github.com/buildkite/agent/v3/logger/log.go @@ -15,7 +15,7 @@ import ( "time" "github.com/buildkite/agent/v3/version" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) const ( @@ -231,11 +231,7 @@ func ColorsSupported() bool { } // Colors can only be shown if STDOUT is a terminal - if terminal.IsTerminal(int(os.Stdout.Fd())) { - return true - } - - return false + return term.IsTerminal(int(os.Stdout.Fd())) } type JSONPrinter struct { diff --git a/vendor/github.com/buildkite/agent/v3/version/VERSION b/vendor/github.com/buildkite/agent/v3/version/VERSION index 9b57023ab0..0f15d1494c 100644 --- a/vendor/github.com/buildkite/agent/v3/version/VERSION +++ b/vendor/github.com/buildkite/agent/v3/version/VERSION @@ -1 +1 @@ -3.62.0 +3.81.0 diff --git a/vendor/github.com/buildkite/go-pipeline/internal/env/env.go b/vendor/github.com/buildkite/go-pipeline/internal/env/env.go new file mode 100644 index 0000000000..bf25c1caca --- /dev/null +++ b/vendor/github.com/buildkite/go-pipeline/internal/env/env.go @@ -0,0 +1,81 @@ +// Package env contains data structures and methods to assist with managing environment variables. +package env + +import ( + "runtime" + "strings" +) + +// Options are functional options for creating a new Env. +type Options func(*Env) + +// CaseSensitive is an option that overrides previous case-sensitivity whether +// set by default or as an `Option`. +func CaseSensitive(actuallyCaseSensitive bool) Options { + return func(e *Env) { + e.caseInsensitive = !actuallyCaseSensitive + } +} + +// FromMap is an option that sets the Env to have the key-values pairs from the `source` map. +// The key-value pair will be inserted with the case sensitivity of the Env, which by default is +// case-insensitive on Windows and case-sensitive on other platforms. +// Note that random map iteration will cause the result to be non-deterministic if there are +// multiple keys in `source`, which are equivalent under case insensitivity, that have different +// corresponding values. +func FromMap(source map[string]string) Options { + return func(e *Env) { + if e.env == nil { + e.env = make(map[string]string, len(source)) + } + for k, v := range source { + e.Set(k, v) + } + } +} + +// Env represents a map of environment variables. By default, the keys are case-insensitive on +// Windows and case-sensitive on other platforms. If they are case-insensitive, the original casing +// is lost. +type Env struct { + env map[string]string + caseInsensitive bool +} + +// New return a new `Env`. By default, it is case-insensitive on Windows and case-sensitive on +// other platforms. See `Options` for available options. +func New(opts ...Options) *Env { + e := &Env{ + caseInsensitive: runtime.GOOS == "windows", + } + for _, o := range opts { + o(e) + } + if e.env == nil { + e.env = make(map[string]string) + } + return e +} + +// Set adds an environment variable to the Env or updates an existing one by overwriting its value. +// If the Env was created as case-insensitive, the keys are case normalised. +func (e *Env) Set(key, value string) { + e.env[e.normaliseCase(key)] = value +} + +// Get returns the value of an environment variable and whether it was found. +// If the Env was created as case-insensitive, the key's case is normalised. +func (e *Env) Get(key string) (string, bool) { + if e == nil { + return "", false + } + v, found := e.env[e.normaliseCase(key)] + return v, found +} + +func (e *Env) normaliseCase(key string) string { + if e.caseInsensitive { + return strings.ToUpper(key) + } + return key +} diff --git a/vendor/github.com/buildkite/go-pipeline/ordered/map.go b/vendor/github.com/buildkite/go-pipeline/ordered/map.go index eb521d7469..3059a8441e 100644 --- a/vendor/github.com/buildkite/go-pipeline/ordered/map.go +++ b/vendor/github.com/buildkite/go-pipeline/ordered/map.go @@ -284,7 +284,6 @@ func (m *Map[K, V]) Range(f func(k K, v V) error) error { func (m *Map[K, V]) MarshalJSON() ([]byte, error) { // NB: writes to b don't error, but JSON encoding could error. var b bytes.Buffer - enc := json.NewEncoder(&b) b.WriteRune('{') first := true err := m.Range(func(k K, v V) error { @@ -293,11 +292,18 @@ func (m *Map[K, V]) MarshalJSON() ([]byte, error) { b.WriteRune(',') } first = false - if err := enc.Encode(k); err != nil { + bk, err := json.Marshal(k) + if err != nil { return err } + b.Write(bk) b.WriteRune(':') - return enc.Encode(v) + bv, err := json.Marshal(v) + if err != nil { + return err + } + b.Write(bv) + return nil }) if err != nil { return nil, err diff --git a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go index cb75b677f5..4358e6a021 100644 --- a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go +++ b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go @@ -6,6 +6,8 @@ import ( "reflect" "strings" + "github.com/buildkite/go-pipeline/warning" + "gopkg.in/yaml.v3" ) @@ -25,6 +27,11 @@ type Unmarshaler interface { // UnmarshalOrdered should unmarshal src into the implementing value. src // will generally be one of *Map[string, any], []any, or a "scalar" built-in // type. + // If UnmarshalOrdered returns a non-nil error that is not a warning, the + // whole unmarshaling process may halt at that point and report that error + // (wrapped). + // Unlike other errors, returning a warning lets unmarshalling continue + // so that all warnings can be printed together at the end. UnmarshalOrdered(src any) error } @@ -157,14 +164,19 @@ func Unmarshal(src, dst any) error { return fmt.Errorf("%w: cannot unmarshal []any into %T", ErrIncompatibleTypes, dst) } etype := sdst.Type().Elem() // E = Type of the slice's elements - for _, a := range tsrc { + var warns []error + for i, a := range tsrc { x := reflect.New(etype) // *E - if err := Unmarshal(a, x.Interface()); err != nil { - return err + err := Unmarshal(a, x.Interface()) + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling item at index %d of %d", i, len(tsrc))) + } else if err != nil { + return fmt.Errorf("unmarshaling item at index %d of %d: %w", i, len(tsrc), err) } sdst = reflect.Append(sdst, x.Elem()) } vdst.Elem().Set(sdst) + return warning.Wrap(warns...) } case string: @@ -263,14 +275,22 @@ func (m *Map[K, V]) decodeInto(target any) error { } valueType := mapType.Elem() - return tm.Range(func(k string, v any) error { + var warns []error + if err := tm.Range(func(k string, v any) error { nv := reflect.New(valueType) - if err := Unmarshal(v, nv.Interface()); err != nil { - return err + err := Unmarshal(v, nv.Interface()) + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling value for key %q", k)) + } else if err != nil { + return fmt.Errorf("unmarshaling value for key %q: %w", k, err) } + innerValue.SetMapIndex(reflect.ValueOf(k), nv.Elem()) return nil - }) + }); err != nil { + return err + } + return warning.Wrap(warns...) case reflect.Struct: // The rest of the method is concerned with this. @@ -285,6 +305,8 @@ func (m *Map[K, V]) decodeInto(target any) error { var inlineField reflect.StructField outlineKeys := make(map[string]struct{}) + var warns []error + for _, field := range fields { // Skip non-exported fields. This is conventional *and* correct. if !field.IsExported() { @@ -341,13 +363,16 @@ func (m *Map[K, V]) decodeInto(target any) error { // Now load value into the field recursively. // Get a pointer to the field. This works because target is a pointer. ptrToField := innerValue.FieldByIndex(field.Index).Addr() - if err := Unmarshal(value, ptrToField.Interface()); err != nil { + err := Unmarshal(value, ptrToField.Interface()) + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling the value for key %q into struct field %q", key, field.Name)) + } else if err != nil { return err } } if inlineField.Index == nil { - return nil + return warning.Wrap(warns...) } // The rest is handling the ",inline" field. // We support any field that Unmarshal can unmarshal tm into. @@ -367,12 +392,20 @@ func (m *Map[K, V]) decodeInto(target any) error { // If the inline map contains nothing, then don't bother setting it. if temp.Len() == 0 { - return nil + return warning.Wrap(warns...) } - return Unmarshal(temp, inlinePtr.Interface()) + err := Unmarshal(temp, inlinePtr.Interface()) + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling the remaining input into an inline field of type %T", inlinePtr.Interface())) + return warning.Wrap(warns...) + } + return err } +// Compile-time check that *Map[string,any] is an Unmarshaler +var _ Unmarshaler = (*MapSA)(nil) + // UnmarshalOrdered unmarshals a value into this map. // K must be string, src must be *Map[string, any], and each value in src must // be unmarshallable into *V. @@ -391,12 +424,19 @@ func (m *Map[K, V]) UnmarshalOrdered(src any) error { return fmt.Errorf("%w: src type %T, want *Map[string, any]", ErrIncompatibleTypes, src) } - return tsrc.Range(func(k string, v any) error { + var warns []error + if err := tsrc.Range(func(k string, v any) error { var dv V - if err := Unmarshal(v, &dv); err != nil { - return err + err := Unmarshal(v, &dv) + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling the value for key %q", k)) + } else if err != nil { + return fmt.Errorf("unmarshaling value for key %q: %w", k, err) } tm.Set(k, dv) return nil - }) + }); err != nil { + return err + } + return warning.Wrap(warns...) } diff --git a/vendor/github.com/buildkite/go-pipeline/parser.go b/vendor/github.com/buildkite/go-pipeline/parser.go index 75dd0907d7..a6fe2927f9 100644 --- a/vendor/github.com/buildkite/go-pipeline/parser.go +++ b/vendor/github.com/buildkite/go-pipeline/parser.go @@ -9,7 +9,21 @@ import ( "gopkg.in/yaml.v3" ) +// Options are functional options for creating a new Env. +type Options func(*Pipeline) + // Parse parses a pipeline. It does not apply interpolation. +// Warnings are passed through the err return: +// +// p, err := Parse(src) +// if w := warning.As(err); w != nil { +// // Here are some warnings that should be shown +// log.Printf("*Warning* - pipeline is not fully parsed:\n%v", w) +// } else if err != nil { +// // Parse could not understand src at all +// return err +// } +// // Use p func Parse(src io.Reader) (*Pipeline, error) { // First get yaml.v3 to give us a raw document (*yaml.Node). n := new(yaml.Node) @@ -24,6 +38,7 @@ func Parse(src io.Reader) (*Pipeline, error) { // with when handling different structural representations of the same // configuration. Then decode _that_ into a pipeline. p := new(Pipeline) + return p, ordered.Unmarshal(n, p) } diff --git a/vendor/github.com/buildkite/go-pipeline/pipeline.go b/vendor/github.com/buildkite/go-pipeline/pipeline.go index 860bfbb0e2..a535d49a65 100644 --- a/vendor/github.com/buildkite/go-pipeline/pipeline.go +++ b/vendor/github.com/buildkite/go-pipeline/pipeline.go @@ -3,7 +3,9 @@ package pipeline import ( "fmt" + "github.com/buildkite/go-pipeline/internal/env" "github.com/buildkite/go-pipeline/ordered" + "github.com/buildkite/go-pipeline/warning" "github.com/buildkite/interpolate" ) @@ -28,19 +30,27 @@ func (p *Pipeline) MarshalJSON() ([]byte, error) { // UnmarshalOrdered unmarshals the pipeline from either []any (a legacy // sequence of steps) or *ordered.MapSA (a modern pipeline configuration). func (p *Pipeline) UnmarshalOrdered(o any) error { + var warns []error + switch o := o.(type) { case *ordered.MapSA: // A pipeline can be a mapping. // Wrap in a secret type to avoid infinite recursion between this method // and ordered.Unmarshal. type wrappedPipeline Pipeline - if err := ordered.Unmarshal(o, (*wrappedPipeline)(p)); err != nil { + err := ordered.Unmarshal(o, (*wrappedPipeline)(p)) + if w := warning.As(err); w != nil { + warns = append(warns, w) + } else if err != nil { return fmt.Errorf("unmarshaling Pipeline: %w", err) } case []any: // A pipeline can be a sequence of steps. - if err := ordered.Unmarshal(o, &p.Steps); err != nil { + err := ordered.Unmarshal(o, &p.Steps) + if w := warning.As(err); w != nil { + warns = append(warns, w) + } else if err != nil { return fmt.Errorf("unmarshaling steps: %w", err) } @@ -51,68 +61,77 @@ func (p *Pipeline) UnmarshalOrdered(o any) error { // Ensure Steps is never nil. Server side expects a sequence. if p.Steps == nil { p.Steps = Steps{} + warns = append(warns, warning.New("pipeline contains no steps")) } - return nil + return warning.Wrap(warns...) } -// needed to plug a reggo map into a interpolate.Env -type mapGetter map[string]string - -func (m mapGetter) Get(key string) (string, bool) { - v, ok := m[key] - return v, ok +// InterpolationEnv contains environment variables that may be interpolated into +// a pipeline. Users may define an equivalence between environment variable name, for example +// the environment variable names may case-insensitive. +type InterpolationEnv interface { + Get(name string) (string, bool) + Set(name string, value string) } -// Interpolate interpolates variables defined in both env and p.Env into the -// pipeline. +// Interpolate interpolates variables defined in both interpolationEnv and p.Env into the pipeline. // More specifically, it does these things: -// - Interpolate pipeline.Env and copy the results into env to apply later. +// - Interpolate pipeline.Env and copy the results into interpolationEnv, provided they don't +// conflict, to apply later. // - Interpolate any string value in the rest of the pipeline. -func (p *Pipeline) Interpolate(env map[string]string) error { - if env == nil { - env = map[string]string{} +// +// By default if an environment variable exists in both the runtime and pipeline env +// we will substitute with the pipeline env IF the pipeline env is defined first. +// Setting the preferRuntimeEnv option to true instead prefers the runtime environment to pipeline +// environment variables when both are defined. +func (p *Pipeline) Interpolate(interpolationEnv InterpolationEnv, preferRuntimeEnv bool) error { + if interpolationEnv == nil { + interpolationEnv = env.New() } // Preprocess any env that are defined in the top level block and place them // into env for later interpolation into the rest of the pipeline. - if err := p.interpolateEnvBlock(env); err != nil { + if err := p.interpolateEnvBlock(interpolationEnv, preferRuntimeEnv); err != nil { return err } - tf := envInterpolator{env: mapGetter(env)} + tf := envInterpolator{env: interpolationEnv} // Recursively go through the rest of the pipeline and perform environment // variable interpolation on strings. Interpolation is performed in-place. if err := interpolateSlice(tf, p.Steps); err != nil { return err } + return interpolateMap(tf, p.RemainingFields) } // interpolateEnvBlock runs interpolate.Interpolate on each pair in p.Env, -// interpolating with the variables defined in env, and then adding the -// results back into both p.Env and env. Each environment variable can -// be interpolated into later environment variables, making the input ordering -// of p.Env potentially important. -func (p *Pipeline) interpolateEnvBlock(env map[string]string) error { +// interpolating with the variables defined in interpolationEnv, and then adding the +// results back into p.Env. Since each environment variable in p.Env can +// be interpolated into later environment variables, we also add the results +// to interpolationEnv, making the input ordering of p.Env potentially important. +func (p *Pipeline) interpolateEnvBlock(interpolationEnv InterpolationEnv, preferRuntimeEnv bool) error { return p.Env.Range(func(k, v string) error { // We interpolate both keys and values. - intk, err := interpolate.Interpolate(mapGetter(env), k) + intk, err := interpolate.Interpolate(interpolationEnv, k) if err != nil { return err } // v is always a string in this case. - intv, err := interpolate.Interpolate(mapGetter(env), v) + intv, err := interpolate.Interpolate(interpolationEnv, v) if err != nil { return err } p.Env.Replace(k, intk, intv) - // Bonus part for the env block! - // Add the results back into env. - env[intk] = intv + // If the variable already existed and we prefer the runtime environment then don't overwrite it + if _, exists := interpolationEnv.Get(intk); !(preferRuntimeEnv && exists) { + interpolationEnv.Set(intk, intv) + } + return nil }) } diff --git a/vendor/github.com/buildkite/go-pipeline/plugin.go b/vendor/github.com/buildkite/go-pipeline/plugin.go index 218e7b5b29..01eb964403 100644 --- a/vendor/github.com/buildkite/go-pipeline/plugin.go +++ b/vendor/github.com/buildkite/go-pipeline/plugin.go @@ -35,13 +35,27 @@ func (p *Plugin) MarshalJSON() ([]byte, error) { return json.Marshal(o) } -// MarshalYAML returns the plugin in either "one-item map" form. Plugin sources +// MarshalYAML returns the plugin in "one-item map" form. Plugin sources // are marshalled into "full" form. Plugins originally specified as a single // string (no config, only source) are canonicalised into "one-item map" with -// config nil. +// config nil. Configs that are zero-length maps are canonicalised to nil. func (p *Plugin) MarshalYAML() (any, error) { + cfg := p.Config + switch x := cfg.(type) { + case map[string]any: + if len(x) == 0 { + cfg = nil + } + + case []any: + // Should be invalid, but a different part of the process should be + // responsible for checking and complaining. + if len(x) == 0 { + cfg = nil + } + } return map[string]any{ - p.FullSource(): p.Config, + p.FullSource(): cfg, }, nil } diff --git a/vendor/github.com/buildkite/go-pipeline/step_command.go b/vendor/github.com/buildkite/go-pipeline/step_command.go index b7761538fa..b49ed4c2d0 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_command.go +++ b/vendor/github.com/buildkite/go-pipeline/step_command.go @@ -36,6 +36,7 @@ type CommandStep struct { Env map[string]string `yaml:"env,omitempty"` Signature *Signature `yaml:"signature,omitempty"` Matrix *Matrix `yaml:"matrix,omitempty"` + Cache *Cache `yaml:"cache,omitempty"` // RemainingFields stores any other top-level mapping items so they at least // survive an unmarshal-marshal round-trip. diff --git a/vendor/github.com/buildkite/go-pipeline/step_command_cache.go b/vendor/github.com/buildkite/go-pipeline/step_command_cache.go new file mode 100644 index 0000000000..6427bb3a48 --- /dev/null +++ b/vendor/github.com/buildkite/go-pipeline/step_command_cache.go @@ -0,0 +1,70 @@ +package pipeline + +import ( + "encoding/json" + "fmt" + + "github.com/buildkite/go-pipeline/ordered" +) + +var _ interface { + json.Marshaler + ordered.Unmarshaler +} = (*Cache)(nil) + +var ( + errUnsupportedCacheType = fmt.Errorf("unsupported type for cache") +) + +// Cache models the cache settings for a given step +type Cache struct { + Disabled bool `yaml:",omitempty"` + Name string `yaml:"name,omitempty"` + Paths []string `yaml:"paths,omitempty"` + Size string `yaml:"size,omitempty"` + + RemainingFields map[string]any `yaml:",inline"` +} + +// MarshalJSON marshals the step to JSON. Special handling is needed because +// yaml.v3 has "inline" but encoding/json has no concept of it. +func (c *Cache) MarshalJSON() ([]byte, error) { + if c.Disabled { + return json.Marshal(false) + } + return inlineFriendlyMarshalJSON(c) +} + +// UnmarshalOrdered unmarshals from the following types: +// - string: a single path +// - []string: multiple paths +// - ordered.Map: a map containing paths, among potentially other things +func (c *Cache) UnmarshalOrdered(o any) error { + switch v := o.(type) { + case bool: + if !v { + c.Disabled = true + } + case string: + c.Paths = []string{v} + + case []any: + s := make([]string, 0, len(v)) + if err := ordered.Unmarshal(v, &s); err != nil { + return err + } + + c.Paths = s + + case *ordered.MapSA: + type wrappedCache Cache + if err := ordered.Unmarshal(o, (*wrappedCache)(c)); err != nil { + return err + } + + default: + return fmt.Errorf("%w: %T", errUnsupportedCacheType, v) + } + + return nil +} diff --git a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go index a89c2426fb..e5e6b62fc7 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go +++ b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go @@ -50,6 +50,12 @@ type Matrix struct { RemainingFields map[string]any `yaml:",inline"` } +// IsEmpty reports whether the matrix is empty (is nil, or has no setup, +// no adjustments, and no other data within it). +func (m *Matrix) IsEmpty() bool { + return m == nil || (len(m.Setup) == 0 && len(m.Adjustments) == 0 && len(m.RemainingFields) == 0) +} + // UnmarshalOrdererd unmarshals from either []any or *ordered.MapSA. func (m *Matrix) UnmarshalOrdered(o any) error { switch src := o.(type) { diff --git a/vendor/github.com/buildkite/go-pipeline/step_input.go b/vendor/github.com/buildkite/go-pipeline/step_input.go index cfe769be12..6688227d85 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_input.go +++ b/vendor/github.com/buildkite/go-pipeline/step_input.go @@ -15,16 +15,26 @@ type InputStep struct { Contents map[string]any `yaml:",inline"` } +// MarshalJSON marshals s.Scalar if it's not empty, otherwise s.Contents if that +// is not empty. If both s.Scalar and s.Contents are empty, it reports an error. func (s *InputStep) MarshalJSON() ([]byte, error) { - if s.Scalar != "" { - return json.Marshal(s.Scalar) + o, err := s.MarshalYAML() + if err != nil { + return nil, err } + return json.Marshal(o) +} +// MarshalYAML returns s.Scalar if it's not empty, otherwise s.Contents if that +// is not empty. If both s.Scalar and s.Contents are empty, it reports an error. +func (s *InputStep) MarshalYAML() (any, error) { + if s.Scalar != "" { + return s.Scalar, nil + } if len(s.Contents) == 0 { - return []byte{}, errors.New("empty input step") + return nil, errors.New("empty input step") } - - return json.Marshal(s.Contents) + return s.Contents, nil } func (s InputStep) interpolate(tf stringTransformer) error { diff --git a/vendor/github.com/buildkite/go-pipeline/step_scalar.go b/vendor/github.com/buildkite/go-pipeline/step_scalar.go index 721ac0254f..7c9b579f9f 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_scalar.go +++ b/vendor/github.com/buildkite/go-pipeline/step_scalar.go @@ -1,6 +1,8 @@ package pipeline -import "fmt" +import ( + "github.com/buildkite/go-pipeline/warning" +) // In the buildkite pipeline yaml, some step types (broadly, wait steps, input steps and block steps) can be represented // either by a scalar string (ie "wait") or by a mapping with keys and values and such @@ -29,13 +31,19 @@ import "fmt" var validStepScalars = []string{"wait", "waiter", "block", "input", "manual"} +// NewScalarStep returns a Step that can be represented as a single string. +// Currently these are "wait", "block", "input", and some deprecated variations +// ("waiter", "manual"). If it is any other string, NewScalarStep returns an +// UnknownStep containing an error wrapping ErrUnknownStepType. func NewScalarStep(s string) (Step, error) { switch s { case "wait", "waiter": return &WaitStep{Scalar: s}, nil + case "block", "input", "manual": return &InputStep{Scalar: s}, nil + default: - return nil, fmt.Errorf("unmarshaling step: unsupported scalar step type %q, want one of %v", s, validStepScalars) + return &UnknownStep{Contents: s}, warning.Newf("%w %q, want one of %v", ErrUnknownStepType, s, validStepScalars) } } diff --git a/vendor/github.com/buildkite/go-pipeline/step_unknown.go b/vendor/github.com/buildkite/go-pipeline/step_unknown.go index 0b01615154..20bf202c20 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_unknown.go +++ b/vendor/github.com/buildkite/go-pipeline/step_unknown.go @@ -2,8 +2,16 @@ package pipeline import ( "encoding/json" + + "github.com/buildkite/go-pipeline/ordered" ) +// Compile-time check that *UnknownStep satisfies necessary interfaces +var _ interface { + Step + ordered.Unmarshaler +} = (*UnknownStep)(nil) + // UnknownStep models any step we don't know how to represent in this version. // When future step types are added, they should be parsed with more specific // types. UnknownStep is present to allow older parsers to preserve newer @@ -13,12 +21,12 @@ type UnknownStep struct { } // MarshalJSON marshals the contents of the step. -func (u UnknownStep) MarshalJSON() ([]byte, error) { +func (u *UnknownStep) MarshalJSON() ([]byte, error) { return json.Marshal(u.Contents) } // MarshalYAML returns the contents of the step. -func (u UnknownStep) MarshalYAML() (any, error) { +func (u *UnknownStep) MarshalYAML() (any, error) { return u.Contents, nil } diff --git a/vendor/github.com/buildkite/go-pipeline/step_wait.go b/vendor/github.com/buildkite/go-pipeline/step_wait.go index 5c7ad23213..e37d5d4c11 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_wait.go +++ b/vendor/github.com/buildkite/go-pipeline/step_wait.go @@ -1,8 +1,6 @@ package pipeline -import ( - "encoding/json" -) +import "encoding/json" // See the comment in step_scalar.go. @@ -14,18 +12,26 @@ type WaitStep struct { Contents map[string]any `yaml:",inline"` } -// MarshalJSON marshals a wait step as "wait" if w is empty, or as the step's scalar if it's set. -// If scalar is empty, it marshals as the remaining fields +// MarshalJSON marshals a wait step as "wait" if the step is empty, or as the +// s.Scalar if it is not empty, or as s.Contents. func (s *WaitStep) MarshalJSON() ([]byte, error) { - if s.Scalar != "" { - return json.Marshal(s.Scalar) + o, err := s.MarshalYAML() + if err != nil { + return nil, err } + return json.Marshal(o) +} +// MarshalYAML returns a wait step as "wait" if the step is empty, or as the +// s.Scalar if it is not empty, or as s.Contents. +func (s *WaitStep) MarshalYAML() (any, error) { + if s.Scalar != "" { + return s.Scalar, nil + } if len(s.Contents) == 0 { - return json.Marshal("wait") + return "wait", nil } - - return json.Marshal(s.Contents) + return s.Contents, nil } func (s *WaitStep) interpolate(tf stringTransformer) error { diff --git a/vendor/github.com/buildkite/go-pipeline/steps.go b/vendor/github.com/buildkite/go-pipeline/steps.go index c9c1c36e1a..7e99530125 100644 --- a/vendor/github.com/buildkite/go-pipeline/steps.go +++ b/vendor/github.com/buildkite/go-pipeline/steps.go @@ -1,11 +1,22 @@ package pipeline import ( + "errors" "fmt" "github.com/buildkite/go-pipeline/ordered" + "github.com/buildkite/go-pipeline/warning" ) +// Sentinel errors that can appear when falling back to UnknownStep. +var ( + ErrStepTypeInference = errors.New("cannot infer step type") + ErrUnknownStepType = errors.New("unknown step type") +) + +// Compile-time check that *Steps is an ordered.Unmarshaler. +var _ ordered.Unmarshaler = (*Steps)(nil) + // Steps contains multiple steps. It is useful for unmarshaling step sequences, // since it has custom logic for determining the correct step type. type Steps []Step @@ -27,14 +38,18 @@ func (s *Steps) UnmarshalOrdered(o any) error { if *s == nil { *s = make(Steps, 0, len(sl)) } - for _, st := range sl { + + var warns []error + for i, st := range sl { step, err := unmarshalStep(st) - if err != nil { + if w := warning.As(err); w != nil { + warns = append(warns, w.Wrapf("while unmarshaling step %d of %d", i+1, len(sl))) + } else if err != nil { return err } *s = append(*s, step) } - return nil + return warning.Wrap(warns...) } func (s Steps) interpolate(tf stringTransformer) error { @@ -45,11 +60,7 @@ func (s Steps) interpolate(tf stringTransformer) error { func unmarshalStep(o any) (Step, error) { switch o := o.(type) { case string: - step, err := NewScalarStep(o) - if err != nil { - return &UnknownStep{Contents: o}, nil - } - return step, nil + return NewScalarStep(o) case *ordered.MapSA: return stepFromMap(o) @@ -59,53 +70,79 @@ func unmarshalStep(o any) (Step, error) { } } +// stepFromMap parses a step (that was originally a YAML mapping). func stepFromMap(o *ordered.MapSA) (Step, error) { sType, hasType := o.Get("type") + var warns []error var step Step var err error + if hasType { sTypeStr, ok := sType.(string) if !ok { return nil, fmt.Errorf("unmarshaling step: step's `type` key was %T (value %v), want string", sType, sType) } - step, err = stepByType(sTypeStr) - if err != nil { - return nil, fmt.Errorf("unmarshaling step: %w", err) - } } else { step, err = stepByKeyInference(o) - if err != nil { - return nil, fmt.Errorf("unmarshaling step: %w", err) - } + } + + if err != nil { + step = new(UnknownStep) + warns = append(warns, err) } // Decode the step (into the right step type). - if err := ordered.Unmarshal(o, step); err != nil { + err = ordered.Unmarshal(o, step) + if w := warning.As(err); w != nil { + warns = append(warns, w) + } else if err != nil { // Hmm, maybe we picked the wrong kind of step? - return &UnknownStep{Contents: o}, nil + // Downgrade this error to a warning. + step = &UnknownStep{Contents: o} + warns = append(warns, warning.Wrapf(err, "fell back using unknown type of step due to an unmarshaling error")) } - return step, nil + return step, warning.Wrap(warns...) } +// stepByType returns a new empty step with a type corresponding to the "type" +// field. Unrecognised type values result in an UnknownStep containing an +// error wrapping ErrUnknownStepType. func stepByType(sType string) (Step, error) { switch sType { case "command", "script": return new(CommandStep), nil + case "wait", "waiter": return &WaitStep{Contents: map[string]any{}}, nil + case "block", "input", "manual": return &InputStep{Contents: map[string]any{}}, nil + case "trigger": return new(TriggerStep), nil + case "group": // as far as i know this doesn't happen, but it's here for completeness return new(GroupStep), nil + default: - return nil, fmt.Errorf("unknown step type %q", sType) + return nil, fmt.Errorf("%w %q", ErrUnknownStepType, sType) } } +// stepByKeyInference returns a new empty step with a type based on some heuristic rules + +// (first rule wins): +// +// - command, commands, plugins -> CommandStep +// - wait, waiter -> WaitStep +// - block, input, manual -> InputStep +// - trigger: TriggerStep +// - group: GroupStep. +// +// Failure to infer a step type results in an UnknownStep containing an +// error wrapping ErrStepTypeInference. func stepByKeyInference(o *ordered.MapSA) (Step, error) { switch { case o.Contains("command") || o.Contains("commands") || o.Contains("plugins"): @@ -126,6 +163,9 @@ func stepByKeyInference(o *ordered.MapSA) (Step, error) { return new(GroupStep), nil default: - return new(UnknownStep), nil + inferrableKeys := []string{ + "command", "commands", "plugins", "wait", "waiter", "block", "input", "manual", "trigger", "group", + } + return nil, fmt.Errorf("%w: need one of %v", ErrStepTypeInference, inferrableKeys) } } diff --git a/vendor/github.com/buildkite/go-pipeline/warning/warning.go b/vendor/github.com/buildkite/go-pipeline/warning/warning.go new file mode 100644 index 0000000000..d36c0507c4 --- /dev/null +++ b/vendor/github.com/buildkite/go-pipeline/warning/warning.go @@ -0,0 +1,143 @@ +// Package warning provides a warning error type (a "soft" multi-error). +// +// Example usage that catches and produces warnings: +// +// func unmarshalThings(src []any) ([]thing, error) { +// var dst []thing +// var warns []error +// for i, a := range src { +// // Unmarshal can produce warnings or errors +// var x thing +// err := Unmarshal(a, &x) +// if w := warning.As(err); w != nil { +// warns = append(warns, w.Wrapf("while unmarshaling item %d of %d", i, len(src))) +// } else if err != nil { +// return err +// } +// dst = append(dst, x) +// } +// // Since it only had warnings, return both dst and a wrapper warning. +// return dst, warning.Wrap(warns...) +// } +package warning + +import ( + "fmt" + "strings" +) + +// Warning is a kind of error that exists so that parsing/processing functions +// can produce warnings that do not abort part-way, but can still be reported +// via the error interface and logged. +// Warning can wrap zero or more errors (that may also be warnings). +// A Warning with zero wrapped errors is considered equivalent to a nil warning. +type Warning struct { + message string + errs []error +} + +// New creates a new warning that wraps one or more errors. Note that msg is *not* a format string. +func New(msg string, errs ...error) *Warning { return &Warning{message: msg, errs: errs} } + +// Newf creates a new warning that wraps a single error created with fmt.Errorf. +// (This enables the use of %w for wrapping other errors.) +func Newf(f string, x ...any) *Warning { return &Warning{errs: []error{fmt.Errorf(f, x...)}} } + +// Wrap returns a new warning with no message that wraps one or more errors. +// If passed no errors, it returns nil. +// If passed a single error that is a warning, it returns that warning. +// This is a convenient way to downgrade an error to a warning, and also handle +// cases where no warnings occurred. +func Wrap(errs ...error) error { + if len(errs) == 0 { + return nil + } + if len(errs) == 1 && Is(errs[0]) { + return errs[0] + } + return &Warning{errs: errs} +} + +// Wrapf wraps a single error in a warning, with a message created using a format +// string. +func Wrapf(err error, f string, x ...any) error { + return &Warning{message: fmt.Sprintf(f, x...), errs: []error{err}} +} + +// Is reports if err is a *Warning. nil is not considered to be a warning. +// This is distinct from errors.Is because an error is a warning only if the top +// level of the tree is a warning (not if any of the recursively-unwrapped +// errors are). +func Is(err error) bool { + if err == nil { + return false + } + _, ok := err.(*Warning) + return ok +} + +// As checks if err is a *Warning, and returns it. If it is nil or not a +// *Warning, it returns nil. This is distinct from errors.As because an error +// is a warning only if the top level of the tree is a warning (not if any of +// the recursively-unwrapped errors are). +func As(err error) *Warning { + if err == nil { + return nil + } + w, _ := err.(*Warning) + return w +} + +// Error returns a string that generally looks like: +// +// w.message +// ↳ w.errs[0].Error() +// ↳ w.errs[1].Error() +// ↳ ... +// +// If the warning has no message, it is omitted. If there is no message and also +// only one child error, that child error's Error() is returned directly. +// Otherwise, Error prepends indentation to sub-error messages that span +// multiple lines to make them print nicely. +func (w *Warning) Error() string { + if w.message == "" && len(w.errs) == 1 { + return w.errs[0].Error() + } + b := new(strings.Builder) + if w.message != "" { + fmt.Fprintln(b, w.message) + } + for _, err := range w.errs { + if err == nil { + continue + } + fmt.Fprint(b, " ↳ ") + for i, line := range strings.Split(err.Error(), "\n") { + if i > 0 { + fmt.Fprint(b, " ") + } + fmt.Fprintf(b, "%s\n", line) + } + } + return strings.TrimSuffix(b.String(), "\n") +} + +// Unwrap returns all errors directly wrapped by this warning. +func (w *Warning) Unwrap() []error { return w.errs } + +// Wrapf wraps this warning in a new warning with a message using a format. +// If w doesn't already have a message, it sets the message and returns w. +func (w *Warning) Wrapf(f string, x ...any) *Warning { + msg := fmt.Sprintf(f, x...) + if w.message == "" { + w.message = msg + return w + } + return &Warning{message: msg, errs: []error{w}} +} + +// Append appends errs as child errors of this warning. +func (w *Warning) Append(errs ...error) *Warning { + w.errs = append(w.errs, errs...) + return w +} diff --git a/vendor/github.com/buildkite/interpolate/README.md b/vendor/github.com/buildkite/interpolate/README.md index ec2790efd4..114e21590f 100644 --- a/vendor/github.com/buildkite/interpolate/README.md +++ b/vendor/github.com/buildkite/interpolate/README.md @@ -1,6 +1,7 @@ Interpolate =========== +[![Build status](https://badge.buildkite.com/3d9081230a965a20d7b68d66564644febdada7f968cc6f9c9c.svg)](https://buildkite.com/buildkite/interpolate) [![GoDoc](https://godoc.org/github.com/buildkite/interpolate?status.svg)](https://godoc.org/github.com/buildkite/interpolate) A golang library for parameter expansion (like `${BLAH}` or `$BLAH`) in strings from environment variables. An implementation of [POSIX Parameter Expansion](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_02), plus some other basic operations that you'd expect in a shell scripting environment [like bash](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html). @@ -53,7 +54,10 @@ func main() {
Use the substring of parameter after offset of given length. A negative offset must be separated from the colon with a space, and will select from the end of the string. If the offset is out of bounds, an empty string will be substituted. If the length is greater than the length then the entire string will be returned.
${parameter:?[word]}
-
Indicate Error if Null or Unset. If parameter is unset or null, the expansion of word (or a message indicating it is unset if word is omitted) shall be returned as an error.
+
Indicate Error if Null or Unset. If parameter is unset or null, the expansion of word (or a message indicating it is unset if word is omitted) shall be returned as an error.
+ +
$$parameter or \$parameter or $${expression} or \${expression}
+
An escaped interpolation. Will not be interpolated, but will be unescaped by a call to interpolate.Interpolate()
## License diff --git a/vendor/github.com/buildkite/interpolate/interpolate.go b/vendor/github.com/buildkite/interpolate/interpolate.go index 90d5ee3cc5..7a24fd812a 100644 --- a/vendor/github.com/buildkite/interpolate/interpolate.go +++ b/vendor/github.com/buildkite/interpolate/interpolate.go @@ -82,6 +82,19 @@ func (e UnsetValueExpansion) Expand(env Env) (string, error) { return val, nil } +// EscapedExpansion is an expansion that is delayed until later on (usually by a later process) +type EscapedExpansion struct { + Identifier string +} + +func (e EscapedExpansion) Identifiers() []string { + return []string{"$" + e.Identifier} +} + +func (e EscapedExpansion) Expand(Env) (string, error) { + return "$" + e.Identifier, nil +} + // SubstringExpansion returns a substring (or slice) of the env type SubstringExpansion struct { Identifier string diff --git a/vendor/github.com/buildkite/interpolate/parser.go b/vendor/github.com/buildkite/interpolate/parser.go index fa7e84340a..e02cc7d4c4 100644 --- a/vendor/github.com/buildkite/interpolate/parser.go +++ b/vendor/github.com/buildkite/interpolate/parser.go @@ -23,18 +23,20 @@ import ( // and structs named the same reading the string bite by bite (peekRune and nextRune) /* -EscapedBackslash = "\\" -EscapedDollar = ( "\$" | "$$") -Identifier = letter { letters | digit | "_" } -Expansion = "$" ( Identifier | Brace ) -Brace = "{" Identifier [ Identifier BraceOperation ] "}" -Text = { EscapedBackslash | EscapedDollar | all characters except "$" } -Expression = { Text | Expansion } -EmptyValue = ":-" { Expression } -UnsetValue = "-" { Expression } -Substring = ":" number [ ":" number ] -Required = "?" { Expression } -Operation = EmptyValue | UnsetValue | Substring | Required +EscapedBackslash = "\\" +Identifier = letter { letters | digit | "_" } +EscapedDollar = ( "\$" | "$$" ) +EscapedExpansion = EscapedDollar ( Identifier | Brace ) +UnescapedExpansion = "$" ( Identifier | Brace ) +Expansion = UnescapedExpansion | EscapedExpansion +Brace = "{" Identifier [ Identifier BraceOperation ] "}" +Text = { EscapedBackslash | EscapedDollar | all characters except "$" } +Expression = { Text | Expansion } +EmptyValue = ":-" { Expression } +UnsetValue = "-" { Expression } +Substring = ":" number [ ":" number ] +Required = "?" { Expression } +Operation = EmptyValue | UnsetValue | Substring | Required */ const ( @@ -75,9 +77,17 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { p.pos += 2 expr = append(expr, ExpressionItem{Text: `\\`}) continue - } else if strings.HasPrefix(p.input[p.pos:], `\$`) || strings.HasPrefix(p.input[p.pos:], `$$`) { + } + + if strings.HasPrefix(p.input[p.pos:], `\$`) || strings.HasPrefix(p.input[p.pos:], `$$`) { p.pos += 2 - expr = append(expr, ExpressionItem{Text: `$`}) + + ee, err := p.parseEscapedExpansion() + if err != nil { + return nil, err + } + + expr = append(expr, ee) continue } @@ -112,6 +122,31 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { return expr, nil } +func (p *Parser) parseEscapedExpansion() (ExpressionItem, error) { + next := p.peekRune() + switch { + case next == '{': + // if it's an escaped brace expansion, (eg $${MY_COOL_VAR:-5}) consume text until the close brace + id := p.scanUntil(func(r rune) bool { return r == '}' }) + id = id + string(p.nextRune()) // we know that the next rune is a close brace, chuck it on the end + return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil + + case unicode.IsLetter(next): + // it's an escaped identifier (eg $$MY_COOL_VAR) + id, err := p.scanIdentifier() + if err != nil { + return ExpressionItem{}, err + } + + return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil + + default: + // there's no identifier or brace afterward, so it's probably a literal escaped dollar sign + // just return a text item with the dollar sign + return ExpressionItem{Text: "$"}, nil + } +} + func (p *Parser) parseExpansion() (Expansion, error) { if c := p.nextRune(); c != '$' { return nil, fmt.Errorf("Expected expansion to start with $, got %c", c) diff --git a/vendor/github.com/buildkite/roko/.gitignore b/vendor/github.com/buildkite/roko/.gitignore new file mode 100644 index 0000000000..3b735ec4a8 --- /dev/null +++ b/vendor/github.com/buildkite/roko/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/buildkite/roko/LICENSE.md b/vendor/github.com/buildkite/roko/LICENSE.md new file mode 100644 index 0000000000..38365bb21d --- /dev/null +++ b/vendor/github.com/buildkite/roko/LICENSE.md @@ -0,0 +1,7 @@ +Copyright 2022 Buildkite Pty. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/buildkite/roko/README.md b/vendor/github.com/buildkite/roko/README.md new file mode 100644 index 0000000000..84b4c31801 --- /dev/null +++ b/vendor/github.com/buildkite/roko/README.md @@ -0,0 +1,208 @@ +# Roko + +[![Go Reference](https://pkg.go.dev/badge/github.com/buildkite/roko.svg)](https://pkg.go.dev/github.com/buildkite/roko) +[![Build status](https://badge.buildkite.com/f546789abb4ecc61654da783bdcc05965789a4834958b178ec.svg)](https://buildkite.com/buildkite/roko) + +A Lightweight, Configurable, Easy-to-use Retry Library for Go + +## Installation + +To install, run + +``` +go get -u github.com/buildkite/roko +``` + +This will add Roko to your go.mod file, and make it available for use in your project. + +## Usage + +Roko allows you to configure how your application should respond to operations that can fail. Its core interface is the **Retrier**, which allows you tell you application how, and under what circumstances, it should retry an operation. + +Let's say we have some operation that we want to perform: +```Go +func canFail() error { + // ... +} +``` + +and if it fails, we want it to retry every 5 seconds, and give up after 3 tries. To do this, we can configure a retrier, and then perform our operation using the `roko.Retrier.Do()` function: +```Go +r := roko.NewRetrier( + roko.WithMaxAttempts(3), // Only try 3 times, then give up + roko.WithStrategy(roko.Constant(5 * time.Second)), // Wait 5 seconds between attempts +) + +err := r.Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +In this situation, we'll try to run the `canFail` function, and if it returns an error, we'll wait 5 seconds, then try again. If `canFail` returns an error after hitting its max attempt count, `r.Do` will return that error. If `canFail` succeeds (ie it doesn't return an error), r.Do will return nil. + +### Giving up early + +Sometimes, an error that your operation returns might not be recoverable, so we don't want to retry it. In this case, we can use the `roko.Retrier.Break` function. `Break()` instructs the retrier to halt after this run - note that it **doesn't immediately halt operation**. + +```Go +r := roko.NewRetrier( + roko.WithMaxAttempts(3), // Only try 3 times, then give up + roko.WithStrategy(roko.Constant(5 * time.Second)), // Wait 5 seconds between attempts +) + +err := r.Do(func(r *roko.Retrier) error { + err := canFail() + if err.Is(errorUnrecoverable) { + r.Break() // Give up, we can't recover from this error + return err // We still need to return from this function, Break() doesn't halt this callback + // return nil would be appropriate too, if we don't want to handle this error further + } +}) +``` + +In this example, if `canFail()` returns an unrecoverable error, the result returned by the `r.Do()` call is the unrecoverable error. + +### Never give up! + +Alternatively (or as well as!), you might want your retrier to never give up, and continue trying until it eventually succeeds. Roko can facilitate this through the `TryForever()` option. + +```Go +r := roko.NewRetrier( + roko.TryForever(), + roko.WithStrategy(roko.Constant(5 * time.Second)), // Wait 5 seconds between attempts +) + +err := r.Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +This will try to perform `canFail()` until it eventaually succeeds. + +Note that the `Break()` method mentioned above still works when `TryForever()` is enabled - this allows you to still exit when an unrecoverable error comes along. + +### Jitter + +In order to avoid a thundering herd problem, roko can be configured to add jitter to its retry interval calculations. When jitter is used, the interval calulator will add a random length of time up to one second to each interval calculation. + +```Go +r := roko.NewRetrier( + roko.WithMaxAttempts(3), // Only try 3 times, then give up + roko.WithJitter() // Add up to a second of jitter + roko.WithStrategy(roko.Constant(5 * time.Second)), // Wait 5ish seconds between attempts +) + +err := r.Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +In this example, everything is the same as the first example, but instead of always waiting 5 seconds, the retrier will wait for a random interval between 5 and 6 seconds. This can help reduce resource contention. + +### Exponential Backoff + +If a constant retry strategy isn't to your liking, roko can be configured to use exponential backoff instead, based on the number of attempts that have occurred so far: + +```Go +r := roko.NewRetrier( + roko.WithMaxAttempts(5), // Only try 5 times, then give up + roko.WithStrategy(roko.Exponential(2, 0)), // Wait (2 ^ attemptCount) + 0 seconds between attempts +) + +err := r.Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +In this case, the amount of time the retrier will wait between attempts depends on how many attempts have passed - the first wait will be 2^0 == 1 second, then 2^1 == 2 seconds, then 2^3 == 4 seconds, and so on and so forth. + +The second argument to the `roko.Exponential()` method is a constant adjustment - roko will add this number to the calculated exponent. + +### Using a custom strategy + +If the two retry strategies built into roko (`Constant` and `Exponential`) aren't sufficient, you can define your own - the `roko.WithStrategy` method will accept anything that returns a tuple of `(roko.Strategy, string)`. For example, we could implement a custom `Linear` strategy, that multiplies the attempt count by a fixed number: +```Go +func Linear(gradient float64, yIntercept float64) (roko.Strategy, string) { + return func(r *roko.Retrier) time.Duration { + return time.Duration(((gradient * float64(r.AttemptCount())) + yIntercept)) * time.Second + }, "linear" // The second element of the return tuple is the name of the strategy +} + +err := roko.NewRetrier( + roko.WithMaxAttempts(3), // Only try 3 times, then give up + roko.WithStrategy(Linear(0.5, 5.0)), // Wait 5 seconds + half of the attempt count seconds +).Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +### Manually setting the next interval + +Sometimes you only know the desired interval after each try, e.g. a rate-limited API may include a `Retry-After` header. For these cases, the `SetNextInterval(time.Duration)` method can be used. It will apply only to the next interval, and then revert to the configured strategy unless called again on the next attempt. + +```Go +// manually specify interval during each try, defaulting to 10 seconds +roko.NewRetrier( + roko.WithStrategy(Constant(10 * time.Second)), + roko.WithMaxAttempts(10), +).Do(func(r *roko.Retrier) error { + + response := apiCall() // may be rate limited + + if err := response.HTTPError(); err != nil { + if response.Status == HttpTooManyRequests { + if retryAfter, err := strconv.Atoi(response.Header("Retry-After")); err != nil { + + r.SetNextInterval(retryAfter * time.Second) // respect the API + + } + } + return err + } + return nil +}) +``` + +### Retries and Testing + +To speed up tests, roko can be configured with a custom sleep function: + +```Go +err := roko.NewRetrier( + roko.WithStrategy(roko.Constant(50000 * time.Hour)) // Wait a very long time between attempts... + roko.WithSleepFunc(func(time.Duration) {}) // ...but don't actually sleep + roko.WithMaxAttempts(3), +).Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +The actual function passed to `WithSleepFunc()` is arbitrary, but using a noop is probably going to be the most useful. + +For deterministically-generated jitter, the Retrier also accepts a `*rand.Rand`: +```Go +err := roko.NewRetrier( + roko.WithStrategy(roko.Constant(5 * time.Second)) + roko.WithRand(rand.New(rand.NewSource(12345))), // Generate the same jitters every time, using a seeded random number generator + roko.WithMaxAttempts(3), + roko.WithJitter(), +).Do(func(r *roko.Retrier) error { + return canFail() +}) +``` + +The random number generator is only used for jitter, so it only makes sense to pass one if you're using jitter. + +## What's in a name? + +Roko is named after [Josevata Rokocoko](https://en.wikipedia.org/wiki/Joe_Rokocoko), a Fijian-New Zealand rugby player, and one of the best to ever do it. He scored a lot of tries, thus, he's a re-trier. + +Depending on who you ask, it's also [the owner of a basilisk](https://rationalwiki.org/wiki/Roko%27s_basilisk). + +## Contributing + +By all means, please contribute! We'd love to have your input. If you run into a bug, feel free to open an issue, and if you find missing functionality, please don't hesitate to open a PR. If you have a weird and wonderful retry strategy you'd like to add, we'd love to see it. + +## Looking for a great CI provider? Look no further. + +[Buildkite](https://buildkite.com) is a platform for running fast, secure, and scalable CI pipelines on your own infrastructure. diff --git a/vendor/github.com/buildkite/roko/retrier.go b/vendor/github.com/buildkite/roko/retrier.go new file mode 100644 index 0000000000..2c6f9333f7 --- /dev/null +++ b/vendor/github.com/buildkite/roko/retrier.go @@ -0,0 +1,354 @@ +package roko + +import ( + "context" + "fmt" + "math" + "math/rand" + "time" +) + +var defaultRandom = rand.New(rand.NewSource(time.Now().UnixNano())) + +const jitterInterval = 1000 * time.Millisecond + +type Retrier struct { + maxAttempts int + attemptCount int + jitter bool + forever bool + rand *rand.Rand + + breakNext bool + sleepFunc func(time.Duration) + + intervalCalculator Strategy + strategyType string + manualInterval *time.Duration +} + +type Strategy func(*Retrier) time.Duration + +const ( + constantStrategy = "constant" + exponentialStrategy = "exponential" +) + +// Constant returns a strategy that always returns the same value, the interval passed in as an arg to the function +// Semantically, when this is used with a roko.Retrier, it means that the retrier will always wait the given +// duration before retrying +func Constant(interval time.Duration) (Strategy, string) { + if interval < 0 { + panic("constant retry strategies must have a positive interval") + } + + return func(r *Retrier) time.Duration { + return interval + r.Jitter() + }, constantStrategy +} + +// Exponential returns a strategy that increases expontially based on the number of attempts the retrier has made +// It uses the calculation: adjustment + (base ** attempts) + jitter +func Exponential(base, adjustment time.Duration) (Strategy, string) { + if base < 1*time.Second { + panic("exponential retry strategies must have a base of at least 1 second") + } + + return func(r *Retrier) time.Duration { + baseSeconds := int(base / time.Second) + exponentSeconds := math.Pow(float64(baseSeconds), float64(r.attemptCount)) + exponent := time.Duration(exponentSeconds) * time.Second + + return adjustment + exponent + r.Jitter() + }, exponentialStrategy +} + +// ExponentialSubsecond is an exponential backoff using milliseconds as a base unit, +// and so handles sub-second intervals. +// +// Formula is: +// delayMilliseconds = initialMilliseconds ** (retries/16 + 1) + +// The 16 exponent-divisor is arbitrarily chosen to scale curves nicely for a reasonable range +// of initial delays and number of attempts (e.g. 1 second, 10 attempts). +// +// Examples of a small, medium and large initial time.Second growing over 10 attempts: +// +// 100ms → 133ms → 177ms → 237ms → 316ms → 421ms → 562ms → 749ms → 1000ms +// 1.0s → 1.5s → 2.4s → 3.7s → 5.6s → 8.7s → 13.3s → 20.6s → 31.6s +// 5s → 9s → 14s → 25s → 42s → 72s → 120s → 208s → 354s +func ExponentialSubsecond(initial time.Duration) (Strategy, string) { + if initial < 1*time.Millisecond { + panic("ExponentialSubsecond retry strategies must have an initial delay of at least 1 millisecond") + } + + return func(r *Retrier) time.Duration { + result := math.Pow(float64(initial/time.Millisecond), float64(r.attemptCount)/16+1.0) + + return time.Duration(result)*time.Millisecond + r.Jitter() + }, exponentialStrategy +} + +type retrierOpt func(*Retrier) + +// WithMaxAttempts sets the maximum number of retries that a retrier will attempt +func WithMaxAttempts(maxAttempts int) retrierOpt { + return func(r *Retrier) { + r.maxAttempts = maxAttempts + } +} + +func WithRand(rand *rand.Rand) retrierOpt { + return func(r *Retrier) { + r.rand = rand + } +} + +// WithStrategy sets the retry strategy that the retrier will use to determine how long to wait between retries +func WithStrategy(strategy Strategy, strategyType string) retrierOpt { + return func(r *Retrier) { + r.strategyType = strategyType + r.intervalCalculator = strategy + } +} + +// WithJitter enables jitter on the retrier, which will cause all of the retries to wait a random amount of time < 1 second +// The idea here is to avoid thundering herds - retries that are in parallel will happen at slightly different times when +// jitter is enabled, whereas if jitter is disabled, all the retries might happen at the same time, causing further load +// on the system that we're tryung to do something with +func WithJitter() retrierOpt { + return func(r *Retrier) { + r.jitter = true + } +} + +// TryForever causes the retrier to to never give up retrying, until either the operation succeeds, or the operation +// calls retrier.Break() +func TryForever() retrierOpt { + return func(r *Retrier) { + r.forever = true + } +} + +// WithSleepFunc sets the function that the retrier uses to sleep between successive attempts +// Only really useful for testing +func WithSleepFunc(f func(time.Duration)) retrierOpt { + return func(r *Retrier) { + r.sleepFunc = f + } +} + +// NewRetrier creates a new instance of the Retrier struct. Pass in retrierOpt functions to customise the behaviour of +// the retrier +func NewRetrier(opts ...retrierOpt) *Retrier { + r := &Retrier{ + rand: defaultRandom, + } + + for _, o := range opts { + o(r) + } + + // We use panics here rather than returning an error because all of these are logical issues caused by the programmer, + // they should never occur in normal running, and can't be logically recovered from + if r.maxAttempts == 0 && !r.forever { + panic("retriers must either run forever, or have a maximum attempt count") + } + + if r.maxAttempts < 0 { + panic("retriers must have a positive max attempt count") + } + + oldJitter := r.jitter + r.jitter = false // Temporarily turn off jitter while we check if the interval is 0 + if r.forever && r.strategyType == constantStrategy && r.NextInterval() == 0 { + panic("retriers using the constant strategy that run forever must have an interval") + } + r.jitter = oldJitter // and now set it back to what it was previously + + return r +} + +// Jitter returns a duration in the interval (0, 1] s if jitter is enabled, or 0 s if it's not +func (r *Retrier) Jitter() time.Duration { + if !r.jitter { + return 0 + } + return time.Duration((1.0 - r.rand.Float64()) * float64(jitterInterval)) +} + +// MarkAttempt increments the attempt count for the retrier. This affects ShouldGiveUp, and also affects the retry interval +// for Exponential retry strategy +func (r *Retrier) MarkAttempt() { + r.attemptCount += 1 +} + +// Break causes the Retrier to stop retrying after it completes the next retry cycle +func (r *Retrier) Break() { + r.breakNext = true +} + +// SetNextInterval overrides the strategy for the interval before the next try +func (r *Retrier) SetNextInterval(d time.Duration) { + r.manualInterval = &d +} + +// ShouldGiveUp returns whether the retrier should stop trying do do the thing it's been asked to do +// It returns true if the retry count is greater than r.maxAttempts, or if r.Break() has been called +// It returns false if the retrier is supposed to try forever +func (r *Retrier) ShouldGiveUp() bool { + if r.breakNext { + return true + } + + if r.forever { + return false + } + + return r.attemptCount >= r.maxAttempts +} + +// NextInterval returns the next interval that the retrier will use. Behind the scenes, it calls the function generated +// by either retrier's strategy +func (r *Retrier) NextInterval() time.Duration { + if r.manualInterval != nil { + return *r.manualInterval + } + + return r.intervalCalculator(r) +} + +func (r *Retrier) String() string { + str := fmt.Sprintf("Attempt %d/", r.attemptCount+1) // +1 because we increment the attempt count after the callback, which is the only useful place to call Retrier.String() + + if r.forever { + str = str + "∞" + } else { + str = str + fmt.Sprintf("%d", r.maxAttempts) + } + + if r.attemptCount+1 == r.maxAttempts { + return str + } + + nextInterval := r.NextInterval() + if nextInterval > 0 { + str = str + fmt.Sprintf(" Retrying in %s", nextInterval) + } else { + str = str + " Retrying immediately" + } + + return str +} + +func (r *Retrier) AttemptCount() int { + return r.attemptCount +} + +// Do is the core loop of a Retrier. It defines the operation that the Retrier will attempt to perform, retrying it if necessary +// Calling retrier.Do(someFunc) will cause the Retrier to attempt to call the function, and if it returns an error, +// retry it using the settings provided to it. +func (r *Retrier) Do(callback func(*Retrier) error) error { + return r.DoWithContext(context.Background(), callback) +} + +// DoWithContext is a context-aware variant of Do. +func (r *Retrier) DoWithContext(ctx context.Context, callback func(*Retrier) error) error { + for { + // Perform the action the user has requested we retry + err := callback(r) + if err == nil { + return nil + } + + // Calculate the next interval before we increment the attempt count + // In the exponential case, if we didn't do this, we'd skip the first interval + // ie, we would wait 2^1, 2^2, 2^3, ..., 2^n+1 seconds (bad) + // instead of 2^0, 2^1, 2^2, ..., 2^n seconds (good) + nextInterval := r.NextInterval() + + // Reset the manualInterval now that the nextInterval has been acquired. + r.manualInterval = nil + + r.MarkAttempt() + + // If the last callback called r.Break(), or if we've hit our call limit, bail out and return the last error we got + if r.ShouldGiveUp() { + return err + } + + if err := r.sleepOrDone(ctx, nextInterval); err != nil { + return err + } + } +} + +// DoFunc is a helper for retrying callback functions that return a value or an +// error. It returns the last value returned by a call to callback, and reports +// an error if none of the calls succeeded. +// (Note this is not a method of Retrier, since methods can't be generic.) +func DoFunc[T any](ctx context.Context, r *Retrier, callback func(*Retrier) (T, error)) (T, error) { + var t T + err := r.DoWithContext(ctx, func(rt *Retrier) error { + var err error + t, err = callback(rt) + return err + }) + return t, err +} + +// DoFunc2 is a helper for retrying callback functions that return two value or +// an error. It returns the last values returned by a call to callback, and +// reports an error if none of the calls succeeded. +// (Note this is not a method of Retrier, since methods can't be generic.) +func DoFunc2[T1, T2 any](ctx context.Context, r *Retrier, callback func(*Retrier) (T1, T2, error)) (T1, T2, error) { + var t1 T1 + var t2 T2 + err := r.DoWithContext(ctx, func(rt *Retrier) error { + var err error + t1, t2, err = callback(rt) + return err + }) + return t1, t2, err +} + +// DoFunc3 is a helper for retrying callback functions that return 3 values or +// an error. It returns the last values returned by a call to callback, and +// reports an error if none of the calls succeeded. +// (Note this is not a method of Retrier, since methods can't be generic.) +func DoFunc3[T1, T2, T3 any](ctx context.Context, r *Retrier, callback func(*Retrier) (T1, T2, T3, error)) (T1, T2, T3, error) { + var t1 T1 + var t2 T2 + var t3 T3 + err := r.DoWithContext(ctx, func(rt *Retrier) error { + var err error + t1, t2, t3, err = callback(rt) + return err + }) + return t1, t2, t3, err +} + +func (r *Retrier) sleepOrDone(ctx context.Context, nextInterval time.Duration) error { + if r.sleepFunc == nil { + t := time.NewTimer(nextInterval) + defer t.Stop() + select { + case <-t.C: + return nil + case <-ctx.Done(): + return ctx.Err() + } + } + + sleepCh := make(chan struct{}) + go func() { + r.sleepFunc(nextInterval) + close(sleepCh) + }() + select { + case <-sleepCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go index 9a70c14328..6a846ece95 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -64,16 +64,28 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) if now == nil { now = time.Now } - return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now} + return &RemoteKeySet{ + jwksURL: jwksURL, + now: now, + // For historical reasons, this package uses contexts for configuration, not just + // cancellation. In hindsight, this was a bad idea. + // + // Attemps to reason about how cancels should work with background requests have + // largely lead to confusion. Use the context here as a config bag-of-values and + // ignore the cancel function. + ctx: context.WithoutCancel(ctx), + } } // RemoteKeySet is a KeySet implementation that validates JSON web tokens against // a jwks_uri endpoint. type RemoteKeySet struct { jwksURL string - ctx context.Context now func() time.Time + // Used for configuration. Cancelation is ignored. + ctx context.Context + // guard all other fields mu sync.RWMutex diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go index 0ac58d2995..52b27b746a 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -120,8 +120,8 @@ type Config struct { } // VerifierContext returns an IDTokenVerifier that uses the provider's key set to -// verify JWTs. As opposed to Verifier, the context is used for all requests to -// the upstream JWKs endpoint. +// verify JWTs. As opposed to Verifier, the context is used to configure requests +// to the upstream JWKs endpoint. The provided context's cancellation is ignored. func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) } diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 0000000000..ac12e485a1 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go_import_path: github.com/dustin/go-humanize +go: + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - diff -u <(echo -n) <(gofmt -d -s .) + - go vet . + - go install -v -race ./... + - go test -v -race ./... diff --git a/vendor/github.com/mpvl/unique/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE similarity index 84% rename from vendor/github.com/mpvl/unique/LICENSE rename to vendor/github.com/dustin/go-humanize/LICENSE index 60b39f2707..8d9a94a906 100644 --- a/vendor/github.com/mpvl/unique/LICENSE +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -1,6 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2015 Marcel van Lohuizen +Copyright (c) 2005-2008 Dustin Sallings Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,8 +7,8 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -20,3 +18,4 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 0000000000..7d0b16b34f --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 0000000000..f49dc337dc --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 0000000000..3b015fd59e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,189 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 0000000000..0b498f4885 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 0000000000..520ae3e57d --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 0000000000..2bc83a03cf --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,41 @@ +//go:build go1.6 +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 0000000000..bce923f371 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,49 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 0000000000..a2c2da31ef --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 0000000000..6470d0d47a --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < (0.0 - math.MaxFloat64) { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 0000000000..43d88a8619 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 0000000000..8b85019849 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,127 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 0000000000..dd3fbf5efc --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 28bdd2fc08..6f717dbd86 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,27 @@ +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index aba08424c3..d81b03b447 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } decrypter, err := newDecrypter(key) if err != nil { return nil, err @@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index a565aaab27..8a52842106 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { return key.K.bytes(), nil } -func tryJWKS(key interface{}, headers ...Header) interface{} { +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { var jwks JSONWebKeySet switch jwksType := key.(type) { @@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { case JSONWebKeySet: jwks = jwksType default: - return key + // If the specified key is not a JWKS, return as is. + return key, nil } + // Determine the KID to search for from the headers. var kid string for _, header := range headers { if header.KeyID != "" { @@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { } } + // If no KID is specified in the headers, reject. if kid == "" { - return key + return nil, ErrJWKSKidNotFound } + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. keys := jwks.Key(kid) if len(keys) == 0 { - return key + return nil, ErrJWKSKidNotFound } - return keys[0].Key + return keys[0].Key, nil } diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/builder.go b/vendor/github.com/go-jose/go-jose/v4/jwt/builder.go new file mode 100644 index 0000000000..d68bb3725c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/builder.go @@ -0,0 +1,315 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "bytes" + "reflect" + + "github.com/go-jose/go-jose/v4/json" + + "github.com/go-jose/go-jose/v4" +) + +// Builder is a utility for making JSON Web Tokens. Calls can be chained, and +// errors are accumulated until the final call to Serialize. +type Builder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) Builder + // Token builds a JSONWebToken from provided data. + Token() (*JSONWebToken, error) + // Serialize serializes a token. + Serialize() (string, error) +} + +// NestedBuilder is a utility for making Signed-Then-Encrypted JSON Web Tokens. +// Calls can be chained, and errors are accumulated until final call to +// Serialize. +type NestedBuilder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) NestedBuilder + // Token builds a NestedJSONWebToken from provided data. + Token() (*NestedJSONWebToken, error) + // Serialize serializes a token. + Serialize() (string, error) +} + +type builder struct { + payload map[string]interface{} + err error +} + +type signedBuilder struct { + builder + sig jose.Signer +} + +type encryptedBuilder struct { + builder + enc jose.Encrypter +} + +type nestedBuilder struct { + builder + sig jose.Signer + enc jose.Encrypter +} + +// Signed creates builder for signed tokens. +func Signed(sig jose.Signer) Builder { + return &signedBuilder{ + sig: sig, + } +} + +// Encrypted creates builder for encrypted tokens. +func Encrypted(enc jose.Encrypter) Builder { + return &encryptedBuilder{ + enc: enc, + } +} + +// SignedAndEncrypted creates builder for signed-then-encrypted tokens. +// ErrInvalidContentType will be returned if encrypter doesn't have JWT content type. +func SignedAndEncrypted(sig jose.Signer, enc jose.Encrypter) NestedBuilder { + if contentType, _ := enc.Options().ExtraHeaders[jose.HeaderContentType].(jose.ContentType); contentType != "JWT" { + return &nestedBuilder{ + builder: builder{ + err: ErrInvalidContentType, + }, + } + } + return &nestedBuilder{ + sig: sig, + enc: enc, + } +} + +func (b builder) claims(i interface{}) builder { + if b.err != nil { + return b + } + + m, ok := i.(map[string]interface{}) + switch { + case ok: + return b.merge(m) + case reflect.Indirect(reflect.ValueOf(i)).Kind() == reflect.Struct: + m, err := normalize(i) + if err != nil { + return builder{ + err: err, + } + } + return b.merge(m) + default: + return builder{ + err: ErrInvalidClaims, + } + } +} + +func normalize(i interface{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + + raw, err := json.Marshal(i) + if err != nil { + return nil, err + } + + d := json.NewDecoder(bytes.NewReader(raw)) + d.SetNumberType(json.UnmarshalJSONNumber) + + if err := d.Decode(&m); err != nil { + return nil, err + } + + return m, nil +} + +func (b *builder) merge(m map[string]interface{}) builder { + p := make(map[string]interface{}) + for k, v := range b.payload { + p[k] = v + } + for k, v := range m { + p[k] = v + } + + return builder{ + payload: p, + } +} + +func (b *builder) token(p func(interface{}) ([]byte, error), h []jose.Header) (*JSONWebToken, error) { + return &JSONWebToken{ + payload: p, + Headers: h, + }, nil +} + +func (b *signedBuilder) Claims(i interface{}) Builder { + return &signedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + } +} + +func (b *signedBuilder) Token() (*JSONWebToken, error) { + sig, err := b.sign() + if err != nil { + return nil, err + } + + h := make([]jose.Header, len(sig.Signatures)) + for i, v := range sig.Signatures { + h[i] = v.Header + } + + return b.builder.token(sig.Verify, h) +} + +func (b *signedBuilder) Serialize() (string, error) { + sig, err := b.sign() + if err != nil { + return "", err + } + + return sig.CompactSerialize() +} + +func (b *signedBuilder) sign() (*jose.JSONWebSignature, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.sig.Sign(p) +} + +func (b *encryptedBuilder) Claims(i interface{}) Builder { + return &encryptedBuilder{ + builder: b.builder.claims(i), + enc: b.enc, + } +} + +func (b *encryptedBuilder) Serialize() (string, error) { + enc, err := b.encrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *encryptedBuilder) Token() (*JSONWebToken, error) { + enc, err := b.encrypt() + if err != nil { + return nil, err + } + + return b.builder.token(enc.Decrypt, []jose.Header{enc.Header}) +} + +func (b *encryptedBuilder) encrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.enc.Encrypt(p) +} + +func (b *nestedBuilder) Claims(i interface{}) NestedBuilder { + return &nestedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + enc: b.enc, + } +} + +// Token produced a token suitable for serialization. It cannot be decrypted +// without serializing and then deserializing. +func (b *nestedBuilder) Token() (*NestedJSONWebToken, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return nil, err + } + + return &NestedJSONWebToken{ + allowedSignatureAlgorithms: nil, + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} + +func (b *nestedBuilder) Serialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *nestedBuilder) FullSerialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.FullSerialize(), nil +} + +func (b *nestedBuilder) signAndEncrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + sig, err := b.sig.Sign(p) + if err != nil { + return nil, err + } + + p2, err := sig.CompactSerialize() + if err != nil { + return nil, err + } + + return b.enc.Encrypt([]byte(p2)) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/claims.go b/vendor/github.com/go-jose/go-jose/v4/jwt/claims.go new file mode 100644 index 0000000000..e73412a391 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/claims.go @@ -0,0 +1,130 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "strconv" + "time" + + "github.com/go-jose/go-jose/v4/json" +) + +// Claims represents public claim values (as specified in RFC 7519). +type Claims struct { + Issuer string `json:"iss,omitempty"` + Subject string `json:"sub,omitempty"` + Audience Audience `json:"aud,omitempty"` + Expiry *NumericDate `json:"exp,omitempty"` + NotBefore *NumericDate `json:"nbf,omitempty"` + IssuedAt *NumericDate `json:"iat,omitempty"` + ID string `json:"jti,omitempty"` +} + +// NumericDate represents date and time as the number of seconds since the +// epoch, ignoring leap seconds. Non-integer values can be represented +// in the serialized format, but we round to the nearest second. +// See RFC7519 Section 2: https://tools.ietf.org/html/rfc7519#section-2 +type NumericDate int64 + +// NewNumericDate constructs NumericDate from time.Time value. +func NewNumericDate(t time.Time) *NumericDate { + if t.IsZero() { + return nil + } + + // While RFC 7519 technically states that NumericDate values may be + // non-integer values, we don't bother serializing timestamps in + // claims with sub-second accurancy and just round to the nearest + // second instead. Not convined sub-second accuracy is useful here. + out := NumericDate(t.Unix()) + return &out +} + +// MarshalJSON serializes the given NumericDate into its JSON representation. +func (n NumericDate) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatInt(int64(n), 10)), nil +} + +// UnmarshalJSON reads a date from its JSON representation. +func (n *NumericDate) UnmarshalJSON(b []byte) error { + s := string(b) + + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return ErrUnmarshalNumericDate + } + + *n = NumericDate(f) + return nil +} + +// Time returns time.Time representation of NumericDate. +func (n *NumericDate) Time() time.Time { + if n == nil { + return time.Time{} + } + return time.Unix(int64(*n), 0) +} + +// Audience represents the recipients that the token is intended for. +type Audience []string + +// UnmarshalJSON reads an audience from its JSON representation. +func (s *Audience) UnmarshalJSON(b []byte) error { + var v interface{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + switch v := v.(type) { + case string: + *s = []string{v} + case []interface{}: + a := make([]string, len(v)) + for i, e := range v { + s, ok := e.(string) + if !ok { + return ErrUnmarshalAudience + } + a[i] = s + } + *s = a + default: + return ErrUnmarshalAudience + } + + return nil +} + +// MarshalJSON converts audience to json representation. +func (s Audience) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal(s[0]) + } + return json.Marshal([]string(s)) +} + +// Contains checks whether a given string is included in the Audience +func (s Audience) Contains(v string) bool { + for _, a := range s { + if a == v { + return true + } + } + return false +} diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/doc.go b/vendor/github.com/go-jose/go-jose/v4/jwt/doc.go new file mode 100644 index 0000000000..30b886ef0e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/doc.go @@ -0,0 +1,20 @@ +/*- + * Copyright 2017 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +Package jwt provides an implementation of the JSON Web Token standard. +*/ +package jwt diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/errors.go b/vendor/github.com/go-jose/go-jose/v4/jwt/errors.go new file mode 100644 index 0000000000..27388e5449 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/errors.go @@ -0,0 +1,53 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "errors" + +// ErrUnmarshalAudience indicates that aud claim could not be unmarshalled. +var ErrUnmarshalAudience = errors.New("go-jose/go-jose/jwt: expected string or array value to unmarshal to Audience") + +// ErrUnmarshalNumericDate indicates that JWT NumericDate could not be unmarshalled. +var ErrUnmarshalNumericDate = errors.New("go-jose/go-jose/jwt: expected number value to unmarshal NumericDate") + +// ErrInvalidClaims indicates that given claims have invalid type. +var ErrInvalidClaims = errors.New("go-jose/go-jose/jwt: expected claims to be value convertible into JSON object") + +// ErrInvalidIssuer indicates invalid iss claim. +var ErrInvalidIssuer = errors.New("go-jose/go-jose/jwt: validation failed, invalid issuer claim (iss)") + +// ErrInvalidSubject indicates invalid sub claim. +var ErrInvalidSubject = errors.New("go-jose/go-jose/jwt: validation failed, invalid subject claim (sub)") + +// ErrInvalidAudience indicated invalid aud claim. +var ErrInvalidAudience = errors.New("go-jose/go-jose/jwt: validation failed, invalid audience claim (aud)") + +// ErrInvalidID indicates invalid jti claim. +var ErrInvalidID = errors.New("go-jose/go-jose/jwt: validation failed, invalid ID claim (jti)") + +// ErrNotValidYet indicates that token is used before time indicated in nbf claim. +var ErrNotValidYet = errors.New("go-jose/go-jose/jwt: validation failed, token not valid yet (nbf)") + +// ErrExpired indicates that token is used after expiry time indicated in exp claim. +var ErrExpired = errors.New("go-jose/go-jose/jwt: validation failed, token is expired (exp)") + +// ErrIssuedInTheFuture indicates that the iat field is in the future. +var ErrIssuedInTheFuture = errors.New("go-jose/go-jose/jwt: validation field, token issued in the future (iat)") + +// ErrInvalidContentType indicates that token requires JWT cty header. +var ErrInvalidContentType = errors.New("go-jose/go-jose/jwt: expected content type to be JWT (cty header)") diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/jwt.go b/vendor/github.com/go-jose/go-jose/v4/jwt/jwt.go new file mode 100644 index 0000000000..c4998d7875 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/jwt.go @@ -0,0 +1,198 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "fmt" + "strings" + + jose "github.com/go-jose/go-jose/v4" + "github.com/go-jose/go-jose/v4/json" +) + +// JSONWebToken represents a JSON Web Token (as specified in RFC7519). +type JSONWebToken struct { + payload func(k interface{}) ([]byte, error) + unverifiedPayload func() []byte + Headers []jose.Header +} + +type NestedJSONWebToken struct { + enc *jose.JSONWebEncryption + Headers []jose.Header + // Used when parsing and decrypting an input + allowedSignatureAlgorithms []jose.SignatureAlgorithm +} + +// Claims deserializes a JSONWebToken into dest using the provided key. +func (t *JSONWebToken) Claims(key interface{}, dest ...interface{}) error { + b, err := t.payload(key) + if err != nil { + return err + } + + for _, d := range dest { + if err := json.Unmarshal(b, d); err != nil { + return err + } + } + + return nil +} + +// UnsafeClaimsWithoutVerification deserializes the claims of a +// JSONWebToken into the dests. For signed JWTs, the claims are not +// verified. This function won't work for encrypted JWTs. +func (t *JSONWebToken) UnsafeClaimsWithoutVerification(dest ...interface{}) error { + if t.unverifiedPayload == nil { + return fmt.Errorf("go-jose/go-jose: Cannot get unverified claims") + } + claims := t.unverifiedPayload() + for _, d := range dest { + if err := json.Unmarshal(claims, d); err != nil { + return err + } + } + return nil +} + +func (t *NestedJSONWebToken) Decrypt(decryptionKey interface{}) (*JSONWebToken, error) { + b, err := t.enc.Decrypt(decryptionKey) + if err != nil { + return nil, err + } + + sig, err := ParseSigned(string(b), t.allowedSignatureAlgorithms) + if err != nil { + return nil, err + } + + return sig, nil +} + +// ParseSigned parses token from JWS form. +func ParseSigned(s string, signatureAlgorithms []jose.SignatureAlgorithm) (*JSONWebToken, error) { + sig, err := jose.ParseSignedCompact(s, signatureAlgorithms) + if err != nil { + return nil, err + } + headers := make([]jose.Header, len(sig.Signatures)) + for i, signature := range sig.Signatures { + headers[i] = signature.Header + } + + return &JSONWebToken{ + payload: sig.Verify, + unverifiedPayload: sig.UnsafePayloadWithoutVerification, + Headers: headers, + }, nil +} + +func validateKeyEncryptionAlgorithm(algs []jose.KeyAlgorithm) error { + for _, alg := range algs { + switch alg { + case jose.ED25519, + jose.RSA1_5, + jose.RSA_OAEP, + jose.RSA_OAEP_256, + jose.ECDH_ES, + jose.ECDH_ES_A128KW, + jose.ECDH_ES_A192KW, + jose.ECDH_ES_A256KW: + return fmt.Errorf("asymmetric encryption algorithms not supported for JWT: "+ + "invalid key encryption algorithm: %s", alg) + case jose.PBES2_HS256_A128KW, + jose.PBES2_HS384_A192KW, + jose.PBES2_HS512_A256KW: + return fmt.Errorf("password-based encryption not supported for JWT: "+ + "invalid key encryption algorithm: %s", alg) + } + } + return nil +} + +func parseEncryptedCompact( + s string, + keyAlgorithms []jose.KeyAlgorithm, + contentEncryption []jose.ContentEncryption, +) (*jose.JSONWebEncryption, error) { + err := validateKeyEncryptionAlgorithm(keyAlgorithms) + if err != nil { + return nil, err + } + enc, err := jose.ParseEncryptedCompact(s, keyAlgorithms, contentEncryption) + if err != nil { + return nil, err + } + return enc, nil +} + +// ParseEncrypted parses token from JWE form. +// +// The keyAlgorithms and contentEncryption parameters are used to validate the "alg" and "enc" +// header parameters respectively. They must be nonempty, and each "alg" or "enc" header in +// parsed data must contain a value that is present in the corresponding parameter. That +// includes the protected and unprotected headers as well as all recipients. To accept +// multiple algorithms, pass a slice of all the algorithms you want to accept. +func ParseEncrypted(s string, + keyAlgorithms []jose.KeyAlgorithm, + contentEncryption []jose.ContentEncryption, +) (*JSONWebToken, error) { + enc, err := parseEncryptedCompact(s, keyAlgorithms, contentEncryption) + if err != nil { + return nil, err + } + + return &JSONWebToken{ + payload: enc.Decrypt, + Headers: []jose.Header{enc.Header}, + }, nil +} + +// ParseSignedAndEncrypted parses signed-then-encrypted token from JWE form. +// +// The encryptionKeyAlgorithms and contentEncryption parameters are used to validate the "alg" and "enc" +// header parameters, respectively, of the outer JWE. They must be nonempty, and each "alg" or "enc" +// header in parsed data must contain a value that is present in the corresponding parameter. That +// includes the protected and unprotected headers as well as all recipients. To accept +// multiple algorithms, pass a slice of all the algorithms you want to accept. +// +// The signatureAlgorithms parameter is used to validate the "alg" header parameter of the +// inner JWS. It must be nonempty, and the "alg" header in the inner JWS must contain a value +// that is present in the parameter. +func ParseSignedAndEncrypted(s string, + encryptionKeyAlgorithms []jose.KeyAlgorithm, + contentEncryption []jose.ContentEncryption, + signatureAlgorithms []jose.SignatureAlgorithm, +) (*NestedJSONWebToken, error) { + enc, err := parseEncryptedCompact(s, encryptionKeyAlgorithms, contentEncryption) + if err != nil { + return nil, err + } + + contentType, _ := enc.Header.ExtraHeaders[jose.HeaderContentType].(string) + if strings.ToUpper(contentType) != "JWT" { + return nil, ErrInvalidContentType + } + + return &NestedJSONWebToken{ + allowedSignatureAlgorithms: signatureAlgorithms, + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/jwt/validation.go b/vendor/github.com/go-jose/go-jose/v4/jwt/validation.go new file mode 100644 index 0000000000..841a93eef4 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/jwt/validation.go @@ -0,0 +1,127 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "time" + +const ( + // DefaultLeeway defines the default leeway for matching NotBefore/Expiry claims. + DefaultLeeway = 1.0 * time.Minute +) + +// Expected defines values used for protected claims validation. +// If field has zero value then validation is skipped, with the exception of +// Time, where the zero value means "now." To skip validating them, set the +// corresponding field in the Claims struct to nil. +type Expected struct { + // Issuer matches the "iss" claim exactly. + Issuer string + // Subject matches the "sub" claim exactly. + Subject string + // AnyAudience matches if there is a non-empty intersection between + // its values and the values in the "aud" claim. + AnyAudience Audience + // ID matches the "jti" claim exactly. + ID string + // Time matches the "exp", "nbf" and "iat" claims with leeway. + Time time.Time +} + +// WithTime copies expectations with new time. +func (e Expected) WithTime(t time.Time) Expected { + e.Time = t + return e +} + +// Validate checks claims in a token against expected values. +// A default leeway value of one minute is used to compare time values. +// +// The default leeway will cause the token to be deemed valid until one +// minute after the expiration time. If you're a server application that +// wants to give an extra minute to client tokens, use this +// function. If you're a client application wondering if the server +// will accept your token, use ValidateWithLeeway with a leeway <=0, +// otherwise this function might make you think a token is valid when +// it is not. +func (c Claims) Validate(e Expected) error { + return c.ValidateWithLeeway(e, DefaultLeeway) +} + +// ValidateWithLeeway checks claims in a token against expected values. A +// custom leeway may be specified for comparing time values. You may pass a +// zero value to check time values with no leeway, but you should note that +// numeric date values are rounded to the nearest second and sub-second +// precision is not supported. +// +// The leeway gives some extra time to the token from the server's +// point of view. That is, if the token is expired, ValidateWithLeeway +// will still accept the token for 'leeway' amount of time. This fails +// if you're using this function to check if a server will accept your +// token, because it will think the token is valid even after it +// expires. So if you're a client validating if the token is valid to +// be submitted to a server, use leeway <=0, if you're a server +// validation a token, use leeway >=0. +func (c Claims) ValidateWithLeeway(e Expected, leeway time.Duration) error { + if e.Issuer != "" && e.Issuer != c.Issuer { + return ErrInvalidIssuer + } + + if e.Subject != "" && e.Subject != c.Subject { + return ErrInvalidSubject + } + + if e.ID != "" && e.ID != c.ID { + return ErrInvalidID + } + + if len(e.AnyAudience) != 0 { + var intersection bool + for _, v := range e.AnyAudience { + if c.Audience.Contains(v) { + intersection = true + break + } + } + + if !intersection { + return ErrInvalidAudience + } + } + + // validate using the e.Time, or time.Now if not provided + validationTime := e.Time + if validationTime.IsZero() { + validationTime = time.Now() + } + + if c.NotBefore != nil && validationTime.Add(leeway).Before(c.NotBefore.Time()) { + return ErrNotValidYet + } + + if c.Expiry != nil && validationTime.Add(-leeway).After(c.Expiry.Time()) { + return ErrExpired + } + + // IssuedAt is optional but cannot be in the future. This is not required by the RFC, but + // something is misconfigured if this happens and we should not trust it. + if c.IssuedAt != nil && validationTime.Add(leeway).Before(c.IssuedAt.Time()) { + return ErrIssuedInTheFuture + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go index 68db085ef6..429427232e 100644 --- a/vendor/github.com/go-jose/go-jose/v4/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index 46c9a4d96f..3dec0112b6 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } verifier, err := newVerifier(key) if err != nil { return err @@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err diff --git a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml index 6cd9695e17..405740a1f1 100644 --- a/vendor/github.com/google/certificate-transparency-go/.golangci.yaml +++ b/vendor/github.com/google/certificate-transparency-go/.golangci.yaml @@ -18,5 +18,11 @@ linters-settings: issues: exclude-use-default: false - exclude: - - "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked" + exclude-rules: + # The following grpc linters are excluded because grpc.Dial, grpc.DialContext and grpc.WithBlock will be supported throughout 1.x. + - linters: [staticcheck] + text: 'SA1019: grpc.Dial is deprecated: use NewClient instead' + - linters: [staticcheck] + text: 'SA1019: grpc.DialContext is deprecated: use NewClient instead' + - linters: [staticcheck] + text: 'SA1019: grpc.WithBlock is deprecated: this DialOption is not supported by NewClient' diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS index 942cec9b38..3ec5370574 100644 --- a/vendor/github.com/google/certificate-transparency-go/AUTHORS +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -11,6 +11,7 @@ Alex Cohn Comodo CA Limited Ed Maste +Elisha Silas Fiaz Hossain Google LLC Internet Security Research Group diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index e286ae20d0..ad59927495 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,6 +2,149 @@ ## HEAD +## v1.2.1 + +### Fixes + +* Fix Go potential bugs and maintainability by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1496 + +### Dependency update + +* Bump google.golang.org/grpc from 1.63.2 to 1.64.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1482 + +## v1.2.0 + +### CTFE Storage Saving: Extra Data Issuance Chain Deduplication + +To reduce CT/Trillian database storage by deduplication of the entire issuance chain (intermediate certificate(s) and root certificate) that is currently stored in the Trillian merkle tree leaf ExtraData field. Storage cost should be reduced by at least 33% for new CT logs with this feature enabled. Currently only MySQL/MariaDB is supported to store the issuance chain in the CTFE database. + +Existing logs are not affected by this change. + +Log operators can choose to opt-in this change for new CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/mysql/schema.sql). See [example](trillian/examples/deployment/docker/ctfe/ct_server.cfg). + +- `ctfe_storage_connection_string` +- `extra_data_issuance_chain_storage_backend` + +An optional LRU cache can be enabled by providing the following flags. + +- `cache_type` +- `cache_size` +- `cache_ttl` + +This change is tested in Cloud Build tests using the `mysql:8.4` Docker image as of the time of writing. + +* Add issuance chain storage interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1430 +* Add issuance chain cache interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1431 +* Add CTFE extra data storage saving configs to config.proto by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1432 +* Add new types `PrecertChainEntryHash` and `CertificateChainHash` for TLS marshal/unmarshal in storage saving by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1435 +* Add IssuanceChainCache LRU implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1454 +* Add issuance chain service by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1452 +* Add CTFE extra data storage saving configs validation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1456 +* Add IssuanceChainStorage MySQL implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1462 +* Fix errcheck lint in mysql test by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1464 +* CTFE Extra Data Issuance Chain Deduplication by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1477 +* Fix incorrect deployment doc and server config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1494 + +### Submission proxy: Root compatibility checking + +* Adds the ability for a CT client to disable root compatibile checking by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1258 + +### Fixes + +* Return 429 Too Many Requests for gRPC error code `ResourceExhausted` from Trillian by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1401 +* Safeguard against redirects on PUT request by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1418 +* Fix CT client upload to be safe against no-op POSTs by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1424 + +### Misc + +* Prefix errors.New variables with the word "Err" by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1399 +* Remove lint exceptions and fix remaining issues by @silaselisha in https://github.com/google/certificate-transparency-go/pull/1438 +* Fix invalid Go toolchain version by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1471 +* Regenerate proto files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1489 + +### Dependency update + +* Bump distroless/base-debian12 from `5eae9ef` to `28a7f1f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1388 +* Bump github/codeql-action from 3.24.6 to 3.24.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1389 +* Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1390 +* Bump golang from `6699d28` to `7f9c058` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1391 +* Bump golang from `6699d28` to `7f9c058` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1392 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1393 +* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1394 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1395 +* Bump golang from `7f9c058` to `d996c64` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1396 +* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1397 +* Bump golang from `7f9c058` to `d996c64` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1398 +* Bump github/codeql-action from 3.24.7 to 3.24.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1400 +* Bump github/codeql-action from 3.24.8 to 3.24.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1402 +* Bump go.etcd.io/etcd/v3 from 3.5.12 to 3.5.13 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1405 +* Bump distroless/base-debian12 from `28a7f1f` to `611d30d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1406 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1407 +* Bump golang.org/x/net from 0.22.0 to 0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1408 +* update govulncheck go version from 1.21.8 to 1.21.9 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1412 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1409 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1410 +* Bump golang.org/x/crypto from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1414 +* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1411 +* Bump github/codeql-action from 3.24.9 to 3.24.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1415 +* Bump golang.org/x/net from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1416 +* Bump google.golang.org/grpc from 1.62.1 to 1.63.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1417 +* Bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1419 +* Bump golang from `48b942a` to `3451eec` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1421 +* Bump golang from `48b942a` to `3451eec` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1423 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1420 +* Bump golang from `3451eec` to `b03f3ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1426 +* Bump golang from `3451eec` to `b03f3ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1425 +* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1422 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1427 +* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1428 +* Bump github/codeql-action from 3.24.10 to 3.25.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1433 +* Bump github/codeql-action from 3.25.0 to 3.25.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1434 +* Bump actions/upload-artifact from 4.3.1 to 4.3.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1436 +* Bump actions/checkout from 4.1.2 to 4.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1437 +* Bump actions/upload-artifact from 4.3.2 to 4.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1440 +* Bump github/codeql-action from 3.25.1 to 3.25.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1441 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1444 +* Bump golang from `b03f3ba` to `d0902ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1443 +* Bump github.com/rs/cors from 1.10.1 to 1.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1442 +* Bump golang from `b03f3ba` to `d0902ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1447 +* Bump actions/checkout from 4.1.3 to 4.1.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1446 +* Bump github/codeql-action from 3.25.2 to 3.25.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1449 +* Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1448 +* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1445 +* Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1451 +* Bump distroless/base-debian12 from `611d30d` to `d8d01e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1450 +* Bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1453 +* Bump actions/setup-go from 5.0.0 to 5.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1455 +* Bump golang.org/x/net from 0.24.0 to 0.25.0 and golang.org/x/crypto from v0.22.0 to v0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1457 +* Bump google.golang.org/protobuf from 1.34.0 to 1.34.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1458 +* Bump distroless/base-debian12 from `d8d01e2` to `786007f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1461 +* Bump golangci/golangci-lint-action from 5.1.0 to 5.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1460 +* Bump `go-version-input` to 1.21.10 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1472 +* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1473 +* Bump actions/checkout from 4.1.4 to 4.1.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1469 +* Bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1465 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1466 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1463 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1470 +* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1467 +* Bump github/codeql-action from 3.25.3 to 3.25.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1474 +* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1475 +* Bump ossf/scorecard-action from 2.3.1 to 2.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1476 +* Bump github/codeql-action from 3.25.4 to 3.25.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1478 +* Bump golang from `6d71b7c` to `ef27a3c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1480 +* Bump golang from `6d71b7c` to `ef27a3c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1481 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1479 +* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1483 +* Bump golang from `ef27a3c` to `5c56bd4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1484 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1485 +* Bump golang from `ef27a3c` to `5c56bd4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1486 +* Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1487 +* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1488 +* Bump github/codeql-action from 3.25.5 to 3.25.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1490 +* Bump alpine from `c5b1261` to `58d02b4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1491 +* Bump alpine from `58d02b4` to `77726ef` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1493 + ## v1.1.8 * Recommended Go version for development: 1.21 diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS index 4d04dc6a3d..fd7550fcef 100644 --- a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -30,6 +30,7 @@ Chris Kennelly David Drysdale Deyan Bektchiev Ed Maste +Elisha Silas Emilia Kasper Eran Messeri Fiaz Hossain diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go new file mode 100644 index 0000000000..a2fed51d88 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -0,0 +1,278 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v3.20.1 +// source: client/configpb/multilog.proto + +package configpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +type TemporalLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *TemporalLogConfig) Reset() { + *x = TemporalLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemporalLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemporalLogConfig) ProtoMessage() {} + +func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead. +func (*TemporalLogConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0} +} + +func (x *TemporalLogConfig) GetShard() []*LogShardConfig { + if x != nil { + return x.Shard + } + return nil +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +type LogShardConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The log's public key in DER-encoded PKIX form. + PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"` + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` +} + +func (x *LogShardConfig) Reset() { + *x = LogShardConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogShardConfig) ProtoMessage() {} + +func (x *LogShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_client_configpb_multilog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead. +func (*LogShardConfig) Descriptor() ([]byte, []int) { + return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1} +} + +func (x *LogShardConfig) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *LogShardConfig) GetPublicKeyDer() []byte { + if x != nil { + return x.PublicKeyDer + } + return nil +} + +func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterStart + } + return nil +} + +func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterLimit + } + return nil +} + +var File_client_configpb_multilog_proto protoreflect.FileDescriptor + +var file_client_configpb_multilog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, + 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54, + 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e, + 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_client_configpb_multilog_proto_rawDescOnce sync.Once + file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc +) + +func file_client_configpb_multilog_proto_rawDescGZIP() []byte { + file_client_configpb_multilog_proto_rawDescOnce.Do(func() { + file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData) + }) + return file_client_configpb_multilog_proto_rawDescData +} + +var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_client_configpb_multilog_proto_goTypes = []interface{}{ + (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig + (*LogShardConfig)(nil), // 1: configpb.LogShardConfig + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_client_configpb_multilog_proto_depIdxs = []int32{ + 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig + 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp + 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_client_configpb_multilog_proto_init() } +func file_client_configpb_multilog_proto_init() { + if File_client_configpb_multilog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemporalLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogShardConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_client_configpb_multilog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_client_configpb_multilog_proto_goTypes, + DependencyIndexes: file_client_configpb_multilog_proto_depIdxs, + MessageInfos: file_client_configpb_multilog_proto_msgTypes, + }.Build() + File_client_configpb_multilog_proto = out.File + file_client_configpb_multilog_proto_rawDesc = nil + file_client_configpb_multilog_proto_goTypes = nil + file_client_configpb_multilog_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto new file mode 100644 index 0000000000..0774c35e21 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package configpb; + +option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb"; + +import "google/protobuf/timestamp.proto"; + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +message TemporalLogConfig { + repeated LogShardConfig shard = 1; +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +message LogShardConfig { + string uri = 1; + + // The log's public key in DER-encoded PKIX form. + bytes public_key_der = 2; + + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + google.protobuf.Timestamp not_after_start = 3; + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + google.protobuf.Timestamp not_after_limit = 4; +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go new file mode 100644 index 0000000000..103dc81580 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done. +func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + + params := map[string]string{ + "start": strconv.FormatInt(start, 10), + "end": strconv.FormatInt(end, 10), + } + + var resp ct.GetEntriesResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server +// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures. +// However, this does mean that any certificate parsing failures will cause a failure of the whole +// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke +// ct.LogEntryFromLeaf() on each individual entry. +func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) { + resp, err := c.GetRawEntries(ctx, start, end) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, len(resp.Entries)) + for i, entry := range resp.Entries { + index := start + int64(i) + logEntry, err := ct.LogEntryFromLeaf(index, &entry) + if x509.IsFatal(err) { + return nil, err + } + entries[i] = *logEntry + } + return entries, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go new file mode 100644 index 0000000000..7842c8e288 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go @@ -0,0 +1,226 @@ +// Copyright 2014 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/tls" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + jsonclient.JSONClient +} + +// CheckLogClient is an interface that allows (just) checking of various log contents. +type CheckLogClient interface { + BaseURI() string + GetSTH(context.Context) (*ct.SignedTreeHead, error) + GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) + GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// https://ct.googleapis.com/pilot +// |hc| is the underlying client to be used for HTTP requests to the CT log. +// |opts| can be used to provide a custom logger interface and a public key +// for signature verification. +func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { + logClient, err := jsonclient.New(uri, hc, opts) + if err != nil { + return nil, err + } + return &LogClient{*logClient}, err +} + +// RspError represents a server error including HTTP information. +type RspError = jsonclient.RspError + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. If provided context expires before submission is complete an +// error will be returned. +func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp ct.AddChainResponse + var req ct.AddChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, link.Data) + } + + httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) + if err != nil { + return nil, err + } + + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + exts, err := base64.StdEncoding.DecodeString(resp.Extensions) + if err != nil { + return nil, RspError{ + Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + sct := &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(exts), + Signature: ds, + } + if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sct, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error (which may be of type +// RspError if a raw http.Response is available). +func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) { + var resp ct.GetSTHResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) + if err != nil { + return nil, err + } + + sth, err := resp.ToSignedTreeHead() + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := c.VerifySTHSignature(*sth); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sth, nil +} + +// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is +// successful. +func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + return c.Verifier.VerifySTHSignature(sth) +} + +// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain. +func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp) + if err != nil { + return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err) + } + entry := ct.LogEntry{Leaf: *leaf} + return c.Verifier.VerifySCTSignature(sct, entry) +} + +// GetSTHConsistency retrieves the consistency proof between two snapshots. +func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) { + base10 := 10 + params := map[string]string{ + "first": strconv.FormatUint(first, base10), + "second": strconv.FormatUint(second, base10), + } + var resp ct.GetSTHConsistencyResponse + if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil { + return nil, err + } + return resp.Consistency, nil +} + +// GetProofByHash returns an audit path for the hash of an SCT. +func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) { + b64Hash := base64.StdEncoding.EncodeToString(hash) + base10 := 10 + params := map[string]string{ + "tree_size": strconv.FormatUint(treeSize, base10), + "hash": b64Hash, + } + var resp ct.GetProofByHashResponse + if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for a log. +func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + var resp ct.GetRootsResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) + if err != nil { + return nil, err + } + var roots []ct.ASN1Cert + for _, cert64 := range resp.Certificates { + cert, err := base64.StdEncoding.DecodeString(cert64) + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + roots = append(roots, ct.ASN1Cert{Data: cert}) + } + return roots, nil +} + +// GetEntryAndProof returns a log entry and audit path for the index of a leaf. +func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) { + base10 := 10 + params := map[string]string{ + "leaf_index": strconv.FormatUint(index, base10), + "tree_size": strconv.FormatUint(treeSize, base10), + } + var resp ct.GetEntryAndProofResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go new file mode 100644 index 0000000000..afd75a6db4 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go @@ -0,0 +1,223 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "net/http" + "os" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client/configpb" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/x509" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" +) + +type interval struct { + lower *time.Time // nil => no lower bound + upper *time.Time // nil => no upper bound +} + +// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given +// filename, which should contain text-protobuf encoded configuration data. +func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) { + if len(filename) == 0 { + return nil, errors.New("log config filename empty") + } + + cfgBytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read log config: %v", err) + } + + var cfg configpb.TemporalLogConfig + if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil { + if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil { + return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr) + } + } + + if len(cfg.Shard) == 0 { + return nil, errors.New("empty log config found") + } + return &cfg, nil +} + +// AddLogClient is an interface that allows adding certificates and pre-certificates to a log. +// Both LogClient and TemporalLogClient implement this interface, which allows users to +// commonize code for adding certs to normal/temporal logs. +type AddLogClient interface { + AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) +} + +// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log. +type TemporalLogClient struct { + Clients []*LogClient + intervals []interval +} + +// NewTemporalLogClient builds a new client for interacting with a temporal log. +// The provided config should be contiguous and chronological. +func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) { + if len(cfg.GetShard()) == 0 { + return nil, errors.New("empty config") + } + + overall, err := shardInterval(cfg.Shard[0]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err) + } + intervals := make([]interval, 0, len(cfg.Shard)) + intervals = append(intervals, overall) + for i := 1; i < len(cfg.Shard); i++ { + interval, err := shardInterval(cfg.Shard[i]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err) + } + if overall.upper == nil { + return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i) + } + if interval.lower == nil { + return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i) + } + if !interval.lower.Equal(*overall.upper) { + return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper) + } + overall.upper = interval.upper + intervals = append(intervals, interval) + } + clients := make([]*LogClient, 0, len(cfg.Shard)) + for i, shard := range cfg.Shard { + opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"} + opts.PublicKeyDER = shard.GetPublicKeyDer() + c, err := New(shard.Uri, hc, opts) + if err != nil { + return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err) + } + clients = append(clients, c) + } + tlc := TemporalLogClient{ + Clients: clients, + intervals: intervals, + } + return &tlc, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for all +// of the shards of a temporal log (i.e. the union). +func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + type result struct { + roots []ct.ASN1Cert + err error + } + results := make(chan result, len(tlc.Clients)) + for _, c := range tlc.Clients { + go func(c *LogClient) { + var r result + r.roots, r.err = c.GetAcceptedRoots(ctx) + results <- r + }(c) + } + + var allRoots []ct.ASN1Cert + seen := make(map[[sha256.Size]byte]bool) + for range tlc.Clients { + r := <-results + if r.err != nil { + return nil, r.err + } + for _, root := range r.roots { + h := sha256.Sum256(root.Data) + if seen[h] { + continue + } + seen[h] = true + allRoots = append(allRoots, root) + } + } + return allRoots, nil +} + +// AddChain adds the (DER represented) X509 chain to the appropriate log. +func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log. +func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + // Parse the first entry in the chain + if len(chain) == 0 { + return nil, errors.New("missing chain") + } + cert, err := x509.ParseCertificate(chain[0].Data) + if err != nil { + return nil, fmt.Errorf("failed to parse initial chain entry: %v", err) + } + cidx, err := tlc.IndexByDate(cert.NotAfter) + if err != nil { + return nil, fmt.Errorf("failed to find log to process cert: %v", err) + } + return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain) +} + +// IndexByDate returns the index of the Clients entry that is appropriate for the given +// date. +func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) { + for i, interval := range tlc.intervals { + if (interval.lower != nil) && when.Before(*interval.lower) { + continue + } + if (interval.upper != nil) && !when.Before(*interval.upper) { + continue + } + return i, nil + } + return -1, fmt.Errorf("no log found encompassing date %v", when) +} + +func shardInterval(cfg *configpb.LogShardConfig) (interval, error) { + var interval interval + if cfg.NotAfterStart != nil { + if err := cfg.NotAfterStart.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err) + } + t := cfg.NotAfterStart.AsTime() + interval.lower = &t + } + if cfg.NotAfterLimit != nil { + if err := cfg.NotAfterLimit.CheckValid(); err != nil { + return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err) + } + t := cfg.NotAfterLimit.AsTime() + interval.upper = &t + } + + if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) { + return interval, errors.New("inverted interval") + } + return interval, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml index 6c856c11c1..50ac5ef7b1 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -85,6 +85,13 @@ steps: # Wait for trillian logserver to be up until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose waitFor: ['prepare'] # Run the presubmit tests diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml index 0e47e326f6..566edfe0f9 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -85,6 +85,13 @@ steps: # Wait for trillian logserver to be up until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done + + # Reset the CT test database + export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)" + export MYSQL_HOST="mysql" + export MYSQL_ROOT_PASSWORD="zaphod" + export MYSQL_USER_HOST="%" + yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose waitFor: ['prepare'] # Run the presubmit tests diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go new file mode 100644 index 0000000000..640fcec9c1 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ctutil contains utilities for Certificate Transparency. +package ctutil + +import ( + "bytes" + "crypto" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +var emptyHash = [sha256.Size]byte{} + +// LeafHashB64 does as LeafHash does, but returns the leaf hash base64-encoded. +// The base64-encoded leaf hash returned by B64LeafHash can be used with the +// get-proof-by-hash API endpoint of Certificate Transparency Logs. +func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (string, error) { + hash, err := LeafHash(chain, sct, embedded) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(hash[:]), nil +} + +// LeafHash calculates the leaf hash of the certificate or precertificate at +// chain[0] that sct was issued for. +// +// sct is required because the SCT timestamp is used to calculate the leaf hash. +// Leaf hashes are unique to (pre)certificate-SCT pairs. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to calculate the leaf hash for a normal X.509 +// certificate then it is enough to just provide the end entity +// certificate in chain. This case assumes that the SCT being provided is +// not embedded within the leaf certificate provided, i.e. the certificate +// is what was submitted to the Certificate Transparency Log in order to +// obtain the SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to calculate the leaf hash for a precertificate +// then the issuing certificate must also be provided in chain. The +// precertificate should be at chain[0], and its issuer at chain[1]. For +// this case, set embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If using this function to calculate the leaf hash for a certificate +// where the SCT provided is embedded within the certificate you +// are providing at chain[0], set embedded to true. LeafHash will +// calculate the leaf hash by building the corresponding precertificate. +// LeafHash will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// +// Note: LeafHash doesn't check that the provided SCT verifies for the given +// chain. It simply calculates what the leaf hash would be for the given +// (pre)certificate-SCT pair. +func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) ([sha256.Size]byte, error) { + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return emptyHash, err + } + return ct.LeafHashForLeaf(leaf) +} + +// VerifySCT takes the public key of a Certificate Transparency Log, a +// certificate chain, and an SCT and verifies whether the SCT is a valid SCT for +// the certificate at chain[0], signed by the Log that the public key belongs +// to. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + s, err := ct.NewSignatureVerifier(pubKey) + if err != nil { + return fmt.Errorf("error creating signature verifier: %s", err) + } + + return VerifySCTWithVerifier(s, chain, sct, embedded) +} + +// VerifySCTWithVerifier takes a ct.SignatureVerifier, a certificate chain, and +// an SCT and verifies whether the SCT is a valid SCT for the certificate at +// chain[0], signed by the Log whose public key was used to set up the +// ct.SignatureVerifier. If the SCT does not verify, an error will be returned. +// +// This function can be used with three different types of leaf certificate: +// - X.509 Certificate: +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. +// - Precertificate: +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. +// - X.509 Certificate containing the SCT embedded within it: +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. +func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { + if sv == nil { + return errors.New("ct.SignatureVerifier is nil") + } + + leaf, err := createLeaf(chain, sct, embedded) + if err != nil { + return err + } + + return sv.VerifySCTSignature(*sct, ct.LogEntry{Leaf: *leaf}) +} + +func createLeaf(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (*ct.MerkleTreeLeaf, error) { + if len(chain) == 0 { + return nil, errors.New("chain is empty") + } + if sct == nil { + return nil, errors.New("sct is nil") + } + + if embedded { + sctPresent, err := ContainsSCT(chain[0], sct) + if err != nil { + return nil, fmt.Errorf("error checking for SCT in leaf certificate: %s", err) + } + if !sctPresent { + return nil, errors.New("SCT provided is not embedded within leaf certificate") + } + } + + certType := ct.X509LogEntryType + if chain[0].IsPrecertificate() || embedded { + certType = ct.PrecertLogEntryType + } + + var leaf *ct.MerkleTreeLeaf + var err error + if embedded { + leaf, err = ct.MerkleTreeLeafForEmbeddedSCT(chain, sct.Timestamp) + } else { + leaf, err = ct.MerkleTreeLeafFromChain(chain, certType, sct.Timestamp) + } + if err != nil { + return nil, fmt.Errorf("error creating MerkleTreeLeaf: %s", err) + } + return leaf, nil +} + +// ContainsSCT checks to see whether the given SCT is embedded within the given +// certificate. +func ContainsSCT(cert *x509.Certificate, sct *ct.SignedCertificateTimestamp) (bool, error) { + if cert == nil || sct == nil { + return false, nil + } + + sctBytes, err := tls.Marshal(*sct) + if err != nil { + return false, fmt.Errorf("error tls.Marshalling SCT: %s", err) + } + for _, s := range cert.SCTList.SCTList { + if bytes.Equal(sctBytes, s.Val) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go new file mode 100644 index 0000000000..83d9739c7e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ctutil + +import ( + "context" + "crypto/sha256" + "fmt" + "net/http" + "strings" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/loglist3" + "github.com/google/certificate-transparency-go/x509" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// LogInfo holds the objects needed to perform per-log verification and +// validation of SCTs. +type LogInfo struct { + Description string + Client client.CheckLogClient + MMD time.Duration + Verifier *ct.SignatureVerifier + PublicKey []byte + + mu sync.RWMutex + lastSTH *ct.SignedTreeHead +} + +// NewLogInfo builds a LogInfo object based on a log list entry. +func NewLogInfo(log *loglist3.Log, hc *http.Client) (*LogInfo, error) { + url := log.URL + if !strings.HasPrefix(url, "https://") { + url = "https://" + url + } + lc, err := client.New(url, hc, jsonclient.Options{PublicKeyDER: log.Key, UserAgent: "ct-go-logclient"}) + if err != nil { + return nil, fmt.Errorf("failed to create client for log %q: %v", log.Description, err) + } + return newLogInfo(log, lc) +} + +func newLogInfo(log *loglist3.Log, lc client.CheckLogClient) (*LogInfo, error) { + logKey, err := x509.ParsePKIXPublicKey(log.Key) + if err != nil { + return nil, fmt.Errorf("failed to parse public key data for log %q: %v", log.Description, err) + } + verifier, err := ct.NewSignatureVerifier(logKey) + if err != nil { + return nil, fmt.Errorf("failed to build verifier log %q: %v", log.Description, err) + } + mmd := time.Duration(log.MMD) * time.Second + return &LogInfo{ + Description: log.Description, + Client: lc, + MMD: mmd, + Verifier: verifier, + PublicKey: log.Key, + }, nil +} + +// LogInfoByHash holds LogInfo objects index by the SHA-256 hash of the log's public key. +type LogInfoByHash map[[sha256.Size]byte]*LogInfo + +// LogInfoByKeyHash builds a map of LogInfo objects indexed by their key hashes. +func LogInfoByKeyHash(ll *loglist3.LogList, hc *http.Client) (LogInfoByHash, error) { + return logInfoByKeyHash(ll, hc, NewLogInfo) +} + +func logInfoByKeyHash(ll *loglist3.LogList, hc *http.Client, infoFactory func(*loglist3.Log, *http.Client) (*LogInfo, error)) (map[[sha256.Size]byte]*LogInfo, error) { + result := make(map[[sha256.Size]byte]*LogInfo) + for _, operator := range ll.Operators { + for _, log := range operator.Logs { + h := sha256.Sum256(log.Key) + li, err := infoFactory(log, hc) + if err != nil { + return nil, err + } + result[h] = li + } + } + return result, nil +} + +// LastSTH returns the last STH known for the log. +func (li *LogInfo) LastSTH() *ct.SignedTreeHead { + li.mu.RLock() + defer li.mu.RUnlock() + return li.lastSTH +} + +// SetSTH sets the last STH known for the log. +func (li *LogInfo) SetSTH(sth *ct.SignedTreeHead) { + li.mu.Lock() + defer li.mu.Unlock() + li.lastSTH = sth +} + +// VerifySCTSignature checks the signature in the SCT matches the given leaf (adjusted for the +// timestamp in the SCT) and log. +func (li *LogInfo) VerifySCTSignature(sct ct.SignedCertificateTimestamp, leaf ct.MerkleTreeLeaf) error { + leaf.TimestampedEntry.Timestamp = sct.Timestamp + if err := li.Verifier.VerifySCTSignature(sct, ct.LogEntry{Leaf: leaf}); err != nil { + return fmt.Errorf("failed to verify SCT signature from log %q: %v", li.Description, err) + } + return nil +} + +// VerifyInclusionLatest checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the latest known tree size of the log. If no tree size for the log is known, it will +// be queried. On success, returns the index of the leaf in the log. +func (li *LogInfo) VerifyInclusionLatest(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth := li.LastSTH() + if sth == nil { + var err error + sth, err = li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + } + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusion checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the current tree size of the log. On success, returns the index of the leaf +// in the log. +func (li *LogInfo) VerifyInclusion(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) { + sth, err := li.Client.GetSTH(ctx) + if err != nil { + return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err) + } + li.SetSTH(sth) + return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:]) +} + +// VerifyInclusionAt checks that the given Merkle tree leaf, adjusted for the provided timestamp, +// is present in the given tree size & root hash of the log. On success, returns the index of the +// leaf in the log. +func (li *LogInfo) VerifyInclusionAt(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp, treeSize uint64, rootHash []byte) (int64, error) { + leaf.TimestampedEntry.Timestamp = timestamp + leafHash, err := ct.LeafHashForLeaf(&leaf) + if err != nil { + return -1, fmt.Errorf("failed to create leaf hash: %v", err) + } + + rsp, err := li.Client.GetProofByHash(ctx, leafHash[:], treeSize) + if err != nil { + return -1, fmt.Errorf("failed to GetProofByHash(sct,size=%d): %v", treeSize, err) + } + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(rsp.LeafIndex), treeSize, leafHash[:], rsp.AuditPath, rootHash); err != nil { + return -1, fmt.Errorf("failed to verify inclusion proof at size %d: %v", treeSize, err) + } + return rsp.LeafIndex, nil +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go new file mode 100644 index 0000000000..30932f30d1 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go @@ -0,0 +1,72 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "sync" + "time" +) + +type backoff struct { + mu sync.RWMutex + multiplier uint + notBefore time.Time +} + +const ( + // maximum backoff is 2^(maxMultiplier-1) = 128 seconds + maxMultiplier = 8 +) + +func (b *backoff) set(override *time.Duration) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + if b.notBefore.After(time.Now()) { + if override != nil { + // If existing backoff is set but override would be longer than + // it then set it to that. + notBefore := time.Now().Add(*override) + if notBefore.After(b.notBefore) { + b.notBefore = notBefore + } + } + return time.Until(b.notBefore) + } + var wait time.Duration + if override != nil { + wait = *override + } else { + if b.multiplier < maxMultiplier { + b.multiplier++ + } + wait = time.Second * time.Duration(1<<(b.multiplier-1)) + } + b.notBefore = time.Now().Add(wait) + return wait +} + +func (b *backoff) decreaseMultiplier() { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiplier > 0 { + b.multiplier-- + } +} + +func (b *backoff) until() time.Time { + b.mu.RLock() + defer b.mu.RUnlock() + return b.notBefore +} diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go new file mode 100644 index 0000000000..1dee4cb601 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go @@ -0,0 +1,335 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonclient provides a simple client for fetching and parsing +// JSON CT structures from a log. +package jsonclient + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" + "golang.org/x/net/context/ctxhttp" + "k8s.io/klog/v2" +) + +const maxJitter = 250 * time.Millisecond + +type backoffer interface { + // set adjusts/increases the current backoff interval (typically on retryable failure); + // if the optional parameter is provided, this will be used as the interval if it is greater + // than the currently set interval. Returns the current wait period so that it can be + // logged along with any error message. + set(*time.Duration) time.Duration + // decreaseMultiplier reduces the current backoff multiplier, typically on success. + decreaseMultiplier() + // until returns the time until which the client should wait before making a request, + // it may be in the past in which case it should be ignored. + until() time.Time +} + +// JSONClient provides common functionality for interacting with a JSON server +// that uses cryptographic signatures. +type JSONClient struct { + uri string // the base URI of the server. e.g. https://ct.googleapis/pilot + httpClient *http.Client // used to interact with the server via HTTP + Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) + logger Logger // interface to use for logging warnings and errors + backoff backoffer // object used to store and calculate backoff information + userAgent string // If set, this is sent as the UserAgent header. +} + +// Logger is a simple logging interface used to log internal errors and warnings +type Logger interface { + // Printf formats and logs a message + Printf(string, ...interface{}) +} + +// Options are the options for creating a new JSONClient. +type Options struct { + // Interface to use for logging warnings and errors, if nil the + // standard library log package will be used. + Logger Logger + // PEM format public key to use for signature verification. + PublicKey string + // DER format public key to use for signature verification. + PublicKeyDER []byte + // UserAgent, if set, will be sent as the User-Agent header with each request. + UserAgent string +} + +// ParsePublicKey parses and returns the public key contained in opts. +// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used. +// If neither is set, nil will be returned. +func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) { + if len(opts.PublicKeyDER) > 0 { + return x509.ParsePKIXPublicKey(opts.PublicKeyDER) + } + + if opts.PublicKey != "" { + pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey)) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, errors.New("extra data found after PEM key decoded") + } + return pubkey, nil + } + + return nil, nil +} + +type basicLogger struct{} + +func (bl *basicLogger) Printf(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// RspError represents an error that occurred when processing a response from a server, +// and also includes key details from the http.Response that triggered the error. +type RspError struct { + Err error + StatusCode int + Body []byte +} + +// Error formats the RspError instance, focusing on the error. +func (e RspError) Error() string { + return e.Err.Error() +} + +// New constructs a new JSONClient instance, for the given base URI, using the +// given http.Client object (if provided) and the Options object. +// If opts does not specify a public key, signatures will not be verified. +func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) { + pubkey, err := opts.ParsePublicKey() + if err != nil { + return nil, fmt.Errorf("invalid public key: %v", err) + } + + var verifier *ct.SignatureVerifier + if pubkey != nil { + var err error + verifier, err = ct.NewSignatureVerifier(pubkey) + if err != nil { + return nil, err + } + } + + if hc == nil { + hc = new(http.Client) + } + logger := opts.Logger + if logger == nil { + logger = &basicLogger{} + } + return &JSONClient{ + uri: strings.TrimRight(uri, "/"), + httpClient: hc, + Verifier: verifier, + logger: logger, + backoff: &backoff{}, + userAgent: opts.UserAgent, + }, nil +} + +// BaseURI returns the base URI that the JSONClient makes queries to. +func (c *JSONClient) BaseURI() string { + return c.uri +} + +// GetAndParse makes a HTTP GET call to the given path, and attempts to parse +// the response as a JSON representation of the rsp structure. Returns the +// http.Response, the body of the response, and an error (which may be of +// type RspError if the HTTP response was available). +func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a GET request with URL-encoded parameters. + vals := url.Values{} + for k, v := range params { + vals.Add(k, v) + } + fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode()) + klog.V(2).Infof("GET %s", fullURI) + httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + if err != nil { + return nil, nil, err + } + + // Read everything now so http.Client can reuse the connection. + body, err := io.ReadAll(httpRsp.Body) + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + if err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body} + } + + if httpRsp.StatusCode != http.StatusOK { + return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { + return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + return httpRsp, body, nil +} + +// PostAndParse makes a HTTP POST call to the given path, including the request +// parameters, and attempts to parse the response as a JSON representation of +// the rsp structure. Returns the http.Response, the body of the response, and +// an error (which may be of type RspError if the HTTP response was available). +func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a POST request with JSON body. + postBody, err := json.Marshal(req) + if err != nil { + return nil, nil, err + } + fullURI := fmt.Sprintf("%s%s", c.uri, path) + klog.V(2).Infof("POST %s", fullURI) + httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody)) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + httpReq.Header.Set("Content-Type", "application/json") + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + + // Read all of the body, if there is one, so that the http.Client can do Keep-Alive. + var body []byte + if httpRsp != nil { + body, err = io.ReadAll(httpRsp.Body) + if err := httpRsp.Body.Close(); err != nil { + return nil, nil, err + } + } + if err != nil { + if httpRsp != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + return nil, nil, err + } + if httpRsp.Request.Method != http.MethodPost { + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections#permanent_redirections + return nil, nil, fmt.Errorf("POST request to %q was converted to %s request to %q", fullURI, httpRsp.Request.Method, httpRsp.Request.URL) + } + + if httpRsp.StatusCode == http.StatusOK { + if err = json.Unmarshal(body, &rsp); err != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + } + return httpRsp, body, nil +} + +// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned +// not before time is in the past it returns immediately. +func (c *JSONClient) waitForBackoff(ctx context.Context) error { + dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000))))) + if dur < 0 { + dur = 0 + } + backoffTimer := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-backoffTimer.C: + } + return nil +} + +// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on +// retryable errors; the caller should set a deadline on the provided context +// to prevent infinite retries. Return values are as for PostAndParse. +func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + for { + httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp) + if err != nil { + // Don't retry context errors. + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + wait := c.backoff.set(nil) + c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err) + } else { + switch { + case httpRsp.StatusCode == http.StatusOK: + return httpRsp, body, nil + case httpRsp.StatusCode == http.StatusRequestTimeout: + // Request timeout, retry immediately + c.logger.Printf("Request to %s timed out, retrying immediately", c.uri) + case httpRsp.StatusCode == http.StatusServiceUnavailable: + fallthrough + case httpRsp.StatusCode == http.StatusTooManyRequests: + var backoff *time.Duration + // Retry-After may be either a number of seconds as a int or a RFC 1123 + // date string (RFC 7231 Section 7.1.3) + if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err == nil { + b := time.Duration(seconds) * time.Second + backoff = &b + } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil { + b := time.Until(date) + backoff = &b + } + } + wait := c.backoff.set(backoff) + c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status) + default: + return nil, nil, RspError{ + StatusCode: httpRsp.StatusCode, + Body: body, + Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)} + } + } + if err := c.waitForBackoff(ctx); err != nil { + return nil, nil, err + } + } +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go new file mode 100644 index 0000000000..34949be057 --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go @@ -0,0 +1,129 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loglist3 + +import ( + "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "k8s.io/klog/v2" +) + +// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted +// root-certificates. +type LogRoots map[string]*x509util.PEMCertPool + +// Compatible creates a new LogList containing only Logs matching the temporal, +// root-acceptance and Log-status conditions. +func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList { + active := ll.TemporallyCompatible(cert) + // Do not check root compatbility if roots are not being provided. + if certRoot == nil { + return active + } + return active.RootCompatible(certRoot, roots) +} + +// SelectByStatus creates a new LogList containing only logs with status +// provided from the original. +func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList { + var active LogList + for _, op := range ll.Operators { + activeOp := *op + activeOp.Logs = []*Log{} + for _, l := range op.Logs { + for _, lstat := range lstats { + if l.State.LogStatus() == lstat { + activeOp.Logs = append(activeOp.Logs, l) + break + } + } + } + if len(activeOp.Logs) > 0 { + active.Operators = append(active.Operators, &activeOp) + } + } + return active +} + +// RootCompatible creates a new LogList containing only the logs of original +// LogList that are compatible with the provided cert, according to +// the passed in collection of per-log roots. Logs that are missing from +// the collection are treated as always compatible and included, even if +// an empty cert root is passed in. +// Cert-root when provided is expected to be CA-cert. +func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) LogList { + var compatible LogList + + // Check whether root is a CA-cert. + if certRoot != nil && !certRoot.IsCA { + klog.Warningf("Compatible method expects fully rooted chain, while last cert of the chain provided is not root") + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + // If root set is not defined, we treat Log as compatible assuming no + // knowledge of its roots. + if _, ok := roots[l.URL]; !ok { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + + if certRoot == nil { + continue + } + + // Check root is accepted. + if roots[l.URL].Included(certRoot) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} + +// TemporallyCompatible creates a new LogList containing only the logs of +// original LogList that are compatible with the provided cert, according to +// NotAfter and TemporalInterval matching. +// Returns empty LogList if nil-cert is provided. +func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList { + var compatible LogList + if cert == nil { + return compatible + } + + for _, op := range ll.Operators { + compatibleOp := *op + compatibleOp.Logs = []*Log{} + for _, l := range op.Logs { + if l.TemporalInterval == nil { + compatibleOp.Logs = append(compatibleOp.Logs, l) + continue + } + if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) { + compatibleOp.Logs = append(compatibleOp.Logs, l) + } + } + if len(compatibleOp.Logs) > 0 { + compatible.Operators = append(compatible.Operators, &compatibleOp) + } + } + return compatible +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go new file mode 100644 index 0000000000..c81726515c --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go @@ -0,0 +1,391 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loglist3 allows parsing and searching of the master CT Log list. +// It expects the log list to conform to the v3 schema. +package loglist3 + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + "unicode" + + "github.com/google/certificate-transparency-go/tls" +) + +const ( + // LogListURL has the master URL for Google Chrome's log list. + LogListURL = "https://www.gstatic.com/ct/log_list/v3/log_list.json" + // LogListSignatureURL has the URL for the signature over Google Chrome's log list. + LogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/log_list.sig" + // AllLogListURL has the URL for the list of all known logs (which isn't signed). + AllLogListURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.json" +) + +// Manually mapped from https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + +// LogList holds a collection of CT logs, grouped by operator. +type LogList struct { + // IsAllLogs is set to true if the list contains all known logs, not + // only usable ones. + IsAllLogs bool `json:"is_all_logs,omitempty"` + // Version is the version of the log list. + Version string `json:"version,omitempty"` + // LogListTimestamp is the time at which the log list was published. + LogListTimestamp time.Time `json:"log_list_timestamp,omitempty"` + // Operators is a list of CT log operators and the logs they operate. + Operators []*Operator `json:"operators"` +} + +// Operator holds a collection of CT logs run by the same organisation. +// It also provides information about that organisation, e.g. contact details. +type Operator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // Email lists the email addresses that can be used to contact this log + // operator. + Email []string `json:"email"` + // Logs is a list of CT logs run by this operator. + Logs []*Log `json:"logs"` +} + +// Log describes a single CT log. +type Log struct { + // Description is a human-readable string that describes the log. + Description string `json:"description,omitempty"` + // LogID is the SHA-256 hash of the log's public key. + LogID []byte `json:"log_id"` + // Key is the public key with which signatures can be verified. + Key []byte `json:"key"` + // URL is the address of the HTTPS API. + URL string `json:"url"` + // DNS is the address of the DNS API. + DNS string `json:"dns,omitempty"` + // MMD is the Maximum Merge Delay, in seconds. All submitted + // certificates must be incorporated into the log within this time. + MMD int32 `json:"mmd"` + // PreviousOperators is a list of previous operators and the timestamp + // of when they stopped running the log. + PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"` + // State is the current state of the log, from the perspective of the + // log list distributor. + State *LogStates `json:"state,omitempty"` + // TemporalInterval, if set, indicates that this log only accepts + // certificates with a NotAfter date in this time range. + TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"` + // Type indicates the purpose of this log, e.g. "test" or "prod". + Type string `json:"log_type,omitempty"` +} + +// PreviousOperator holds information about a log operator and the time at which +// they stopped running a log. +type PreviousOperator struct { + // Name is the name of the CT log operator. + Name string `json:"name"` + // EndTime is the time at which the operator stopped running a log. + EndTime time.Time `json:"end_time"` +} + +// TemporalInterval is a time range. +type TemporalInterval struct { + // StartInclusive is the beginning of the time range. + StartInclusive time.Time `json:"start_inclusive"` + // EndExclusive is just after the end of the time range. + EndExclusive time.Time `json:"end_exclusive"` +} + +// LogStatus indicates Log status. +type LogStatus int + +// LogStatus values +const ( + UndefinedLogStatus LogStatus = iota + PendingLogStatus + QualifiedLogStatus + UsableLogStatus + ReadOnlyLogStatus + RetiredLogStatus + RejectedLogStatus +) + +//go:generate stringer -type=LogStatus + +// LogStates are the states that a CT log can be in, from the perspective of a +// user agent. Only one should be set - this is the current state. +type LogStates struct { + // Pending indicates that the log is in the "pending" state. + Pending *LogState `json:"pending,omitempty"` + // Qualified indicates that the log is in the "qualified" state. + Qualified *LogState `json:"qualified,omitempty"` + // Usable indicates that the log is in the "usable" state. + Usable *LogState `json:"usable,omitempty"` + // ReadOnly indicates that the log is in the "readonly" state. + ReadOnly *ReadOnlyLogState `json:"readonly,omitempty"` + // Retired indicates that the log is in the "retired" state. + Retired *LogState `json:"retired,omitempty"` + // Rejected indicates that the log is in the "rejected" state. + Rejected *LogState `json:"rejected,omitempty"` +} + +// LogState contains details on the current state of a CT log. +type LogState struct { + // Timestamp is the time when the state began. + Timestamp time.Time `json:"timestamp"` +} + +// ReadOnlyLogState contains details on the current state of a read-only CT log. +type ReadOnlyLogState struct { + LogState + // FinalTreeHead is the root hash and tree size at which the CT log was + // made read-only. This should never change while the log is read-only. + FinalTreeHead TreeHead `json:"final_tree_head"` +} + +// TreeHead is the root hash and tree size of a CT log. +type TreeHead struct { + // SHA256RootHash is the root hash of the CT log's Merkle tree. + SHA256RootHash []byte `json:"sha256_root_hash"` + // TreeSize is the size of the CT log's Merkle tree. + TreeSize int64 `json:"tree_size"` +} + +// LogStatus method returns Log-status enum value for descriptive struct. +func (ls *LogStates) LogStatus() LogStatus { + switch { + case ls == nil: + return UndefinedLogStatus + case ls.Pending != nil: + return PendingLogStatus + case ls.Qualified != nil: + return QualifiedLogStatus + case ls.Usable != nil: + return UsableLogStatus + case ls.ReadOnly != nil: + return ReadOnlyLogStatus + case ls.Retired != nil: + return RetiredLogStatus + case ls.Rejected != nil: + return RejectedLogStatus + default: + return UndefinedLogStatus + } +} + +// String method returns printable name of the state. +func (ls *LogStates) String() string { + return ls.LogStatus().String() +} + +// Active picks the set-up state. If multiple states are set (not expected) picks one of them. +func (ls *LogStates) Active() (*LogState, *ReadOnlyLogState) { + if ls == nil { + return nil, nil + } + switch { + case ls.Pending != nil: + return ls.Pending, nil + case ls.Qualified != nil: + return ls.Qualified, nil + case ls.Usable != nil: + return ls.Usable, nil + case ls.ReadOnly != nil: + return nil, ls.ReadOnly + case ls.Retired != nil: + return ls.Retired, nil + case ls.Rejected != nil: + return ls.Rejected, nil + default: + return nil, nil + } +} + +// GoogleOperated returns whether Operator is considered to be Google. +func (op *Operator) GoogleOperated() bool { + for _, email := range op.Email { + if strings.Contains(email, "google-ct-logs@googlegroups") { + return true + } + } + return false +} + +// NewFromJSON creates a LogList from JSON encoded data. +func NewFromJSON(llData []byte) (*LogList, error) { + var ll LogList + if err := json.Unmarshal(llData, &ll); err != nil { + return nil, fmt.Errorf("failed to parse log list: %v", err) + } + return &ll, nil +} + +// NewFromSignedJSON creates a LogList from JSON encoded data, checking a +// signature along the way. The signature data should be provided as the +// raw signature data. +func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList, error) { + var sigAlgo tls.SignatureAlgorithm + switch pkType := pubKey.(type) { + case *rsa.PublicKey: + sigAlgo = tls.RSA + case *ecdsa.PublicKey: + sigAlgo = tls.ECDSA + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + tlsSig := tls.DigitallySigned{ + Algorithm: tls.SignatureAndHashAlgorithm{ + Hash: tls.SHA256, + Signature: sigAlgo, + }, + Signature: rawSig, + } + if err := tls.VerifySignature(pubKey, llData, tlsSig); err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + return NewFromJSON(llData) +} + +// FindLogByName returns all logs whose names contain the given string. +func (ll *LogList) FindLogByName(name string) []*Log { + name = strings.ToLower(name) + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + if strings.Contains(strings.ToLower(log.Description), name) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByURL finds the log with the given URL. +func (ll *LogList) FindLogByURL(url string) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + // Don't count trailing slashes + if strings.TrimRight(log.URL, "/") == strings.TrimRight(url, "/") { + return log + } + } + } + return nil +} + +// FindLogByKeyHash finds the log with the given key hash. +func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.LogID, keyhash[:]) { + return log + } + } + } + return nil +} + +// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix. +func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log { + var results []*Log + for _, op := range ll.Operators { + for _, log := range op.Logs { + hh := hex.EncodeToString(log.LogID[:]) + if strings.HasPrefix(hh, prefix) { + results = append(results, log) + } + } + } + return results +} + +// FindLogByKey finds the log with the given DER-encoded key. +func (ll *LogList) FindLogByKey(key []byte) *Log { + for _, op := range ll.Operators { + for _, log := range op.Logs { + if bytes.Equal(log.Key[:], key) { + return log + } + } + } + return nil +} + +var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$") + +// FuzzyFindLog tries to find logs that match the given unspecified input, +// whose format is unspecified. This generally returns a single log, but +// if text input that matches multiple log descriptions is provided, then +// multiple logs may be returned. +func (ll *LogList) FuzzyFindLog(input string) []*Log { + input = strings.Trim(input, " \t") + if logs := ll.FindLogByName(input); len(logs) > 0 { + return logs + } + if log := ll.FindLogByURL(input); log != nil { + return []*Log{log} + } + // Try assuming the input is binary data of some form. First base64: + if data, err := base64.StdEncoding.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Now hex, but strip all internal whitespace first. + input = stripInternalSpace(input) + if data, err := hex.DecodeString(input); err == nil { + if len(data) == sha256.Size { + var hash [sha256.Size]byte + copy(hash[:], data) + if log := ll.FindLogByKeyHash(hash); log != nil { + return []*Log{log} + } + } + if log := ll.FindLogByKey(data); log != nil { + return []*Log{log} + } + } + // Finally, allow hex strings with an odd number of digits. + if hexDigits.MatchString(input) { + if logs := ll.FindLogByKeyHashPrefix(input); len(logs) > 0 { + return logs + } + } + + return nil +} + +func stripInternalSpace(input string) string { + return strings.Map(func(r rune) rune { + if !unicode.IsSpace(r) { + return r + } + return -1 + }, input) +} diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go new file mode 100644 index 0000000000..84c7bbdf4e --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=LogStatus"; DO NOT EDIT. + +package loglist3 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UndefinedLogStatus-0] + _ = x[PendingLogStatus-1] + _ = x[QualifiedLogStatus-2] + _ = x[UsableLogStatus-3] + _ = x[ReadOnlyLogStatus-4] + _ = x[RetiredLogStatus-5] + _ = x[RejectedLogStatus-6] +} + +const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsableLogStatusReadOnlyLogStatusRetiredLogStatusRejectedLogStatus" + +var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117} + +func (i LogStatus) String() string { + if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) { + return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]] +} diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go index b6af606bff..2a96f6a09f 100644 --- a/vendor/github.com/google/certificate-transparency-go/types.go +++ b/vendor/github.com/google/certificate-transparency-go/types.go @@ -248,6 +248,21 @@ type CertificateChain struct { Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` } +// PrecertChainEntryHash is an extended PrecertChainEntry type with the +// IssuanceChainHash field added to store the hash of the +// CertificateChain field of PrecertChainEntry. +type PrecertChainEntryHash struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + +// CertificateChainHash is an extended CertificateChain type with the +// IssuanceChainHash field added to store the hash of the +// Entries field of CertificateChain. +type CertificateChainHash struct { + IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"` +} + // JSONDataEntry holds arbitrary data. type JSONDataEntry struct { Data []byte `tls:"minlen:0,maxlen:1677215"` diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d99..fcd049de92 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae10..2af3ee3dc1 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee59..8919232fd8 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc87..8fac3841be 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed9774..e9aa5d14c0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a..418331a4bd 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7e..548f31da2d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,12 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +305,17 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, } ) @@ -454,7 +465,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +504,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1196,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1242,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1801,365 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2158,7 +2176,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2201,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2209,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2221,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2256,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2268,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2280,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2292,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2304,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2316,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2328,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2340,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2352,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2364,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2376,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2388,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2400,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2412,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2424,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2436,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2448,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2461,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c30..c93f75a78b 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a..e76509ef01 100644 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a..4057e70c8a 100644 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfaddf3645fa6c0578b5b6955d65ac4c172..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ zG%<29H8CnfQWlH+MyjTjz z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y HTP6bly#{pd diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641bafb022339926786ab85b545f40ac665..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c85f9ff41ddfc709924c866350a727a4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c097512f20d09d2054c63fc0f715d7be24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR KZE!sDzAgX``fm0B diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31d6802c7e68c188af8797c3a063894857..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a2648..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c74..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta 0 { + for alg, digest := range d.GetDigest() { + + // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md + // check encoding and length for supported algorithms; + // use of custom, unsupported algorithms is allowed and does not not generate validation errors. + supported, size := isSupportedFixedSizeAlgorithm(alg) + if supported { + // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms + hashBytes, err := hex.DecodeString(digest) + + if err != nil { + return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest) + } + + // check the length of the digest + if len(hashBytes) != size { + return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go new file mode 100644 index 0000000000..3e59869b10 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.24.4 +// source: in_toto_attestation/v1/resource_descriptor.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 ResourceDescriptor. +// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md +// Validation of all fields is left to the users of this proto. +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // Per the Struct protobuf spec, this type corresponds to + // a JSON Object, which is truly a map under the hood. + // So, the Struct a) is still consistent with our specification for + // the `annotations` field, and b) has native support in some language + // bindings making their use easier in implementations. + // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceDescriptor) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *ResourceDescriptor) GetDigest() map[string]string { + if x != nil { + return x.Digest + } + return nil +} + +func (x *ResourceDescriptor) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ResourceDescriptor) GetDownloadLocation() string { + if x != nil { + return x.DownloadLocation + } + return "" +} + +func (x *ResourceDescriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct { + if x != nil { + return x.Annotations + } + return nil +} + +var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x4e, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, + 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc +) + +func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData) + }) + return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData +} + +var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []interface{}{ + (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor + nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry + 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() } +func file_in_toto_attestation_v1_resource_descriptor_proto_init() { + if File_in_toto_attestation_v1_resource_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_resource_descriptor_proto = out.File + file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = nil + file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil + file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go new file mode 100644 index 0000000000..f63d5f0d74 --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go @@ -0,0 +1,50 @@ +/* +Wrapper APIs for in-toto attestation Statement layer protos. +*/ + +package v1 + +import "errors" + +const StatementTypeUri = "https://in-toto.io/Statement/v1" + +var ( + ErrInvalidStatementType = errors.New("wrong statement type") + ErrSubjectRequired = errors.New("at least one subject required") + ErrDigestRequired = errors.New("at least one digest required") + ErrPredicateTypeRequired = errors.New("predicate type required") + ErrPredicateRequired = errors.New("predicate object required") +) + +func (s *Statement) Validate() error { + if s.GetType() != StatementTypeUri { + return ErrInvalidStatementType + } + + if s.GetSubject() == nil || len(s.GetSubject()) == 0 { + return ErrSubjectRequired + } + + // check all resource descriptors in the subject + subject := s.GetSubject() + for _, rd := range subject { + if err := rd.Validate(); err != nil { + return err + } + + // v1 statements require the digest to be set in the subject + if len(rd.GetDigest()) == 0 { + return ErrDigestRequired + } + } + + if s.GetPredicateType() == "" { + return ErrPredicateTypeRequired + } + + if s.GetPredicate() == nil { + return ErrPredicateRequired + } + + return nil +} diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go new file mode 100644 index 0000000000..a2bd2c2d7d --- /dev/null +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.24.4 +// source: in_toto_attestation/v1/statement.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the in-toto v1 Statement. +// https://github.com/in-toto/attestation/tree/main/spec/v1 +// Validation of all fields is left to the users of this proto. +type Statement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Expected to always be "https://in-toto.io/Statement/v1" + Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` + Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` + PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` + Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Statement) Reset() { + *x = Statement{} + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Statement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statement) ProtoMessage() {} + +func (x *Statement) ProtoReflect() protoreflect.Message { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statement.ProtoReflect.Descriptor instead. +func (*Statement) Descriptor() ([]byte, []int) { + return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0} +} + +func (x *Statement) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Statement) GetSubject() []*ResourceDescriptor { + if x != nil { + return x.Subject + } + return nil +} + +func (x *Statement) GetPredicateType() string { + if x != nil { + return x.PredicateType + } + return "" +} + +func (x *Statement) GetPredicate() *structpb.Struct { + if x != nil { + return x.Predicate + } + return nil +} + +var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor + +var file_in_toto_attestation_v1_statement_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, + 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, + 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc4, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x13, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6e, 0x5f, 0x74, 0x6f, 0x74, 0x6f, 0x5f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x47, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x74, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x74, 0x6f, 0x2f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once + file_in_toto_attestation_v1_statement_proto_rawDescData = file_in_toto_attestation_v1_statement_proto_rawDesc +) + +func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { + file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() { + file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(file_in_toto_attestation_v1_statement_proto_rawDescData) + }) + return file_in_toto_attestation_v1_statement_proto_rawDescData +} + +var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_in_toto_attestation_v1_statement_proto_goTypes = []interface{}{ + (*Statement)(nil), // 0: in_toto_attestation.v1.Statement + (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor + (*structpb.Struct)(nil), // 2: google.protobuf.Struct +} +var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{ + 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor + 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_in_toto_attestation_v1_statement_proto_init() } +func file_in_toto_attestation_v1_statement_proto_init() { + if File_in_toto_attestation_v1_statement_proto != nil { + return + } + file_in_toto_attestation_v1_resource_descriptor_proto_init() + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_statement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_in_toto_attestation_v1_statement_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes, + DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs, + MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes, + }.Build() + File_in_toto_attestation_v1_statement_proto = out.File + file_in_toto_attestation_v1_statement_proto_rawDesc = nil + file_in_toto_attestation_v1_statement_proto_goTypes = nil + file_in_toto_attestation_v1_statement_proto_depIdxs = nil +} diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 64df4a8d8c..c01f551abd 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -157,58 +157,44 @@ type ValidationRecord struct { UsedRSAKEX bool `json:"-"` } -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil -} - // Challenge is an aggregate of all data needed for any challenges. // // Rather than define individual types for different types of // challenge, we just throw all the elements into one bucket, // together with the common metadata elements. type Challenge struct { - // The type of challenge + // Type is the type of challenge encoded in this object. Type AcmeChallenge `json:"type"` - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` + // URL is the URL to which a response can be posted. Required for all types. + URL string `json:"url,omitempty"` - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` + // Status is the status of this challenge. Required for all types. + Status AcmeStatus `json:"status,omitempty"` - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` + // Validated is the time at which the server validated the challenge. Required + // if status is valid. + Validated *time.Time `json:"validated,omitempty"` - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` + // Error contains the error that occurred during challenge validation, if any. + // If set, the Status must be "invalid". + Error *probs.ProblemDetails `json:"error,omitempty"` - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges + // Token is a random value that uniquely identifies the challenge. It is used + // by all current challenges (http-01, tls-alpn-01, and dns-01). Token string `json:"token,omitempty"` - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. + // ProvidedKeyAuthorization used to carry the expected key authorization from + // the RA to the VA. However, since this field is never presented to the user + // via the ACME API, it should not be on this type. + // + // Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead. + // TODO(#7514): Remove this. ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` } // ExpectedKeyAuthorization computes the expected KeyAuthorization value for @@ -273,43 +259,18 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { +// CheckPending ensures that a challenge object is pending and has a token. +// This is used before offering the challenge to the client, and before actually +// validating a challenge. +func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") + return fmt.Errorf("challenge is not pending") } - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") + if !looksLikeAToken(ch.Token) { + return fmt.Errorf("token is missing or malformed") } + return nil } diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index 31f6d2fcfa..641521f169 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -76,9 +76,9 @@ func NewToken() string { var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) -// LooksLikeAToken checks whether a string represents a 32-octet value in +// looksLikeAToken checks whether a string represents a 32-octet value in // the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { +func looksLikeAToken(token string) bool { return tokenFormat.MatchString(token) } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 087a018123..04a075d35b 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -39,6 +39,9 @@ var ( ) type Config struct { + // AllowedKeys enables or disables specific key algorithms and sizes. If + // nil, defaults to just those keys allowed by the Let's Encrypt CPS. + AllowedKeys *AllowedKeys // WeakKeyFile is the path to a JSON file containing truncated modulus hashes // of known weak RSA keys. If this config value is empty, then RSA modulus // hash checking will be disabled. @@ -54,6 +57,40 @@ type Config struct { FermatRounds int } +// AllowedKeys is a map of six specific key algorithm and size combinations to +// booleans indicating whether keys of that type are considered good. +type AllowedKeys struct { + // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple + // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes + // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which + // have a known method to easily compute their private key, such as Debian Weak + // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at + // common key sizes, so we restrict all issuance to those common key sizes. + RSA2048 bool + RSA3072 bool + RSA4096 bool + // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid + // points on the NIST P-256, P-384, or P-521 elliptic curves. + ECDSAP256 bool + ECDSAP384 bool + ECDSAP521 bool +} + +// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's +// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA +// 4096, ECDSA 256 and ECDSA P384. +// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate +// If this is ever changed, the CP/CPS MUST be changed first. +func LetsEncryptCPS() AllowedKeys { + return AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + } +} + // ErrBadKey represents an error with a key. It is distinct from the various // ways in which an ACME request can have an erroneous key (BadPublicKeyError, // BadCSRError) because this library is used to check both JWS signing keys and @@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - blockedCheck BlockedKeyCheckFunc + allowedKeys AllowedKeys + weakRSAList *WeakRSAKeys + blockedList *blockedKeys + fermatRounds int + blockedCheck BlockedKeyCheckFunc } -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { +// NewPolicy returns a key policy based on the given configuration, with sane +// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys +// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those +// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization +// is disabled. +func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { + if config == nil { + config = &Config{} + } kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - blockedCheck: bkc, + blockedCheck: bkc, + } + if config.AllowedKeys == nil { + kp.allowedKeys = LetsEncryptCPS() + } else { + kp.allowedKeys = *config.AllowedKeys } if config.WeakKeyFile != "" { keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) @@ -264,44 +302,30 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { // Simply use a whitelist for now. params := c.Params() switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): + case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params(): + return nil + case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params(): return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): + case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params(): return nil default: return badKey("ECDSA curve %v not allowed", params.Name) } } -// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple -// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes -// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which -// have a known method to easily compute their private key, such as Debian Weak -// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at -// common key sizes, so we restrict all issuance to those common key sizes. -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - // GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") +func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error { + modulus := key.N + + err := policy.goodRSABitLen(key) + if err != nil { + return err } + if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { return badKey("key is on a known weak RSA key list") } - modulus := key.N - - // See comment on acceptableRSAKeySizes above. - modulusBitLen := modulus.BitLen() - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - // Rather than support arbitrary exponents, which significantly increases // the size of the key space we allow, we restrict E to the defacto standard // RSA exponent 65537. There is no specific standards document that specifies @@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { return nil } +func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error { + // See comment on AllowedKeys above. + modulusBitLen := key.N.BitLen() + switch { + case modulusBitLen == 2048 && policy.allowedKeys.RSA2048: + return nil + case modulusBitLen == 3072 && policy.allowedKeys.RSA3072: + return nil + case modulusBitLen == 4096 && policy.allowedKeys.RSA4096: + return nil + default: + return badKey("key size not supported: %d", modulusBitLen) + } +} + // Returns true iff integer i is divisible by any of the primes in smallPrimes. // // Short circuits; execution time is dependent on i. Do not use this on secret @@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { b2 := new(big.Int) b2.Mul(a, a).Sub(b2, n) - for i := 0; i < rounds; i++ { + for range rounds { // To see if b2 is a perfect square, we take its square root, square that, // and check to see if we got the same result back. bb.Sqrt(b2).Mul(bb, bb) diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go index 72341b7068..12e58e503a 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go @@ -1,6 +1,7 @@ package acr import ( + "github.com/sirupsen/logrus" "time" ) @@ -12,14 +13,14 @@ type Credentials struct { ExpireTime time.Time } -func (c *Client) GetCredentials(serverURL string) (*Credentials, error) { +func (c *Client) GetCredentials(serverURL string, logger *logrus.Logger) (*Credentials, error) { registry, err := parseServerURL(serverURL) if err != nil { return nil, err } if registry.IsEE { - client, err := newEEClient(registry.Region) + client, err := newEEClient(registry.Region, logger) if err != nil { return nil, err } @@ -33,7 +34,7 @@ func (c *Client) GetCredentials(serverURL string) (*Credentials, error) { return client.getCredentials(registry.InstanceId) } - client, err := newPersonClient(registry.Region) + client, err := newPersonClient(registry.Region, logger) if err != nil { return nil, err } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go index d92095c82c..363e2a0b7f 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go @@ -2,6 +2,7 @@ package acr import ( "fmt" + "github.com/sirupsen/logrus" "time" cr2018 "github.com/alibabacloud-go/cr-20181201/client" @@ -14,8 +15,8 @@ type eeClient struct { client *cr2018.Client } -func newEEClient(region string) (*eeClient, error) { - cred, err := getOpenapiAuth() +func newEEClient(region string, logger *logrus.Logger) (*eeClient, error) { + cred, err := getOpenapiAuth(logger) if err != nil { return nil, err } @@ -46,11 +47,13 @@ func (c *eeClient) getInstanceId(instanceName string) (string, error) { return "", fmt.Errorf("get ACR EE instance id for name %q failed: %s", instanceName, resp.Body.String()) } instances := resp.Body.Instances - if len(instances) == 0 { - return "", fmt.Errorf("get ACR EE instance id for name %q failed: instance name is not found", instanceName) + for _, item := range instances { + if tea.StringValue(item.InstanceName) == instanceName { + return tea.StringValue(item.InstanceId), nil + } } - return tea.StringValue(instances[0].InstanceId), nil + return "", fmt.Errorf("get ACR EE instance id for name %q failed: instance name is not found", instanceName) } func (c *eeClient) getCredentials(instanceId string) (*Credentials, error) { @@ -73,7 +76,7 @@ func (c *eeClient) getCredentials(instanceId string) (*Credentials, error) { cred := &Credentials{ UserName: tea.StringValue(resp.Body.TempUsername), Password: tea.StringValue(resp.Body.AuthorizationToken), - ExpireTime: expTime, + ExpireTime: expTime.Add(-time.Minute), } return cred, nil } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go index 696322342d..346b74d5e6 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go @@ -1,17 +1,26 @@ package acr import ( + "github.com/sirupsen/logrus" "os" "path/filepath" + "time" - "github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper" + "github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider" "github.com/aliyun/credentials-go/credentials" - "github.com/mozillazg/docker-credential-acr-helper/pkg/version" ) var defaultProfilePath = filepath.Join("~", ".alibabacloud", "credentials") -func getOpenapiAuth() (credentials.Credential, error) { +type credentialForV2SDK struct { + *provider.CredentialForV2SDK +} + +type logWrapper struct { + logger *logrus.Logger +} + +func getOpenapiAuth(logger *logrus.Logger) (credentials.Credential, error) { profilePath := defaultProfilePath if os.Getenv(credentials.ENVCredentialFile) != "" { profilePath = os.Getenv(credentials.ENVCredentialFile) @@ -20,18 +29,57 @@ func getOpenapiAuth() (credentials.Credential, error) { if err == nil { if _, err := os.Stat(path); err == nil { _ = os.Setenv(credentials.ENVCredentialFile, path) + return credentials.NewCredential(nil) } } - var conf *credentials.Config - if helper.HaveOidcCredentialRequiredEnv() { - return helper.NewOidcCredential(version.ProjectName) + cp := provider.NewDefaultChainProvider(provider.DefaultChainProviderOptions{ + Logger: &logWrapper{logger: logger}, + }) + cred := &credentialForV2SDK{ + CredentialForV2SDK: provider.NewCredentialForV2SDK(cp, provider.CredentialForV2SDKOptions{ + CredentialRetrievalTimeout: time.Second * 30, + Logger: &logWrapper{logger: logger}, + }), } - cred, err := credentials.NewCredential(conf) return cred, err } +func (c *credentialForV2SDK) GetCredential() (*credentials.CredentialModel, error) { + ak, err := c.GetAccessKeyId() + if err != nil { + return nil, err + } + sk, err := c.GetAccessKeySecret() + if err != nil { + return nil, err + } + token, err := c.GetSecurityToken() + if err != nil { + return nil, err + } + return &credentials.CredentialModel{ + AccessKeyId: ak, + AccessKeySecret: sk, + SecurityToken: token, + BearerToken: nil, + Type: c.GetType(), + }, err +} + +func (l *logWrapper) Info(msg string) { + l.logger.Debug(msg) +} + +func (l *logWrapper) Debug(msg string) { + l.logger.Debug(msg) +} + +func (l *logWrapper) Error(err error, msg string) { + l.logger.WithError(err).Error(msg) +} + func expandPath(path string) (string, error) { if len(path) > 0 && path[0] == '~' { home, err := os.UserHomeDir() diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go index fdf05e1d72..3106245bdc 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go @@ -2,6 +2,7 @@ package acr import ( "fmt" + "github.com/sirupsen/logrus" "time" cr2016 "github.com/alibabacloud-go/cr-20160607/client" @@ -15,8 +16,8 @@ type personClient struct { client *cr2016.Client } -func newPersonClient(region string) (*personClient, error) { - cred, err := getOpenapiAuth() +func newPersonClient(region string, logger *logrus.Logger) (*personClient, error) { + cred, err := getOpenapiAuth(logger) if err != nil { return nil, err } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go index 5bf772ea30..c4f83bf044 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go @@ -34,7 +34,7 @@ func (a *ACRHelper) WithLoggerOut(w io.Writer) *ACRHelper { func (a *ACRHelper) Get(serverURL string) (string, string, error) { // TODO: add cache - cred, err := a.client.GetCredentials(serverURL) + cred, err := a.client.GetCredentials(serverURL, a.logger) if err != nil { a.logger.WithField("name", version.ProjectName). WithField("serverURL", serverURL). diff --git a/vendor/github.com/mpvl/unique/.gitignore b/vendor/github.com/mpvl/unique/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/mpvl/unique/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/mpvl/unique/unique.go b/vendor/github.com/mpvl/unique/unique.go deleted file mode 100644 index d98a54e681..0000000000 --- a/vendor/github.com/mpvl/unique/unique.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package unique provides primitives for finding unique elements of types that -// implement sort.Interface. -package unique - -import "sort" - -// Types that implement unique.Interface can have duplicate elements removed by -// the functionality in this package. -type Interface interface { - sort.Interface - - // Truncate reduces the length to the first n elements. - Truncate(n int) -} - -// Unique removes duplicate elements from data. It assumes sort.IsSorted(data). -func Unique(data Interface) { - data.Truncate(ToFront(data)) -} - -// ToFront reports the number of unique elements of data which it moves to the -// first n positions. It assumes sort.IsSorted(data). -func ToFront(data sort.Interface) (n int) { - n = data.Len() - if n == 0 { - return - } - k := 0 - for i := 1; i < n; i++ { - if data.Less(k, i) { - k++ - data.Swap(k, i) - } - } - return k + 1 -} - -// Sort sorts and removes duplicate entries from data. -func Sort(data Interface) { - sort.Sort(data) - Unique(data) -} - -// IsUniqued reports whether the elements in data are sorted and unique. -func IsUniqued(data sort.Interface) bool { - n := data.Len() - for i := n - 1; i > 0; i-- { - if !data.Less(i-1, i) { - return false - } - } - return true -} - -// Float64Slice attaches the methods of Interface to []float64. -type Float64Slice struct{ P *[]float64 } - -func (p Float64Slice) Len() int { return len(*p.P) } -func (p Float64Slice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } -func (p Float64Slice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } -func (p Float64Slice) Truncate(n int) { *p.P = (*p.P)[:n] } - -// Float64s removes duplicate elements from a sorted slice of float64s. -func Float64s(a *[]float64) { Unique(Float64Slice{a}) } - -// Float64sAreUnique tests whether a slice of float64s is sorted and its -// elements are unique. -func Float64sAreUnique(a []float64) bool { return IsUniqued(sort.Float64Slice(a)) } - -// IntSlice attaches the methods of Interface to []int. -type IntSlice struct{ P *[]int } - -func (p IntSlice) Len() int { return len(*p.P) } -func (p IntSlice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } -func (p IntSlice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } -func (p IntSlice) Truncate(n int) { *p.P = (*p.P)[:n] } - -// Ints removes duplicate elements from a sorted slice of ints. -func Ints(a *[]int) { Unique(IntSlice{a}) } - -// IntsAreUnique tests whether a slice of ints is sorted and its elements are -// unique. -func IntsAreUnique(a []int) bool { return IsUniqued(sort.IntSlice(a)) } - -// StringSlice attaches the methods of Interface to []string. -type StringSlice struct{ P *[]string } - -func (p StringSlice) Len() int { return len(*p.P) } -func (p StringSlice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } -func (p StringSlice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } -func (p StringSlice) Truncate(n int) { *p.P = (*p.P)[:n] } - -// Strings removes duplicate elements from a sorted slice of strings. -func Strings(a *[]string) { Unique(StringSlice{a}) } - -// StringsAreUnique tests whether a slice of strings is sorted and its elements -// are unique. -func StringsAreUnique(a []string) bool { return IsUniqued(sort.StringSlice(a)) } diff --git a/vendor/github.com/oleiade/reflections/.gitignore b/vendor/github.com/oleiade/reflections/.gitignore index 00268614f0..20682a2f7f 100644 --- a/vendor/github.com/oleiade/reflections/.gitignore +++ b/vendor/github.com/oleiade/reflections/.gitignore @@ -20,3 +20,5 @@ _cgo_export.* _testmain.go *.exe + +styles/ \ No newline at end of file diff --git a/vendor/github.com/oleiade/reflections/.golangci.yml b/vendor/github.com/oleiade/reflections/.golangci.yml new file mode 100644 index 0000000000..9e8e5fa68e --- /dev/null +++ b/vendor/github.com/oleiade/reflections/.golangci.yml @@ -0,0 +1,76 @@ +# v1.47.2 +# Please don't remove the first line. It uses in CI to determine the golangci version +run: + deadline: 5m + +issues: + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + + # We want to try and improve the comments in the k6 codebase, so individual + # non-golint items from the default exclusion list will gradually be addded + # to the exclude-rules below + exclude-use-default: false + + exclude-rules: + # Exclude duplicate code and function length and complexity checking in test + # files (due to common repeats and long functions in test code) + - path: _(test|gen)\.go + linters: + - cyclop + - dupl + - gocognit + - funlen + - lll + - linters: + - paralleltest # false positive: https://github.com/kunwardeep/paralleltest/issues/8. + text: "does not use range value in test Run" + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + govet: + enable-all: true + disable: + - fieldalignment + cyclop: + max-complexity: 25 + dupl: + threshold: 150 + goconst: + min-len: 10 + min-occurrences: 4 + funlen: + lines: 80 + statements: 60 + +linters: + enable-all: true + disable: + - depguard + - nlreturn + - gci + - gochecknoinits + - godot + - godox + - gomodguard + - testpackage + - wsl + - gomnd + - err113 + - goheader + - thelper + - gocyclo # replaced by cyclop since it also calculates the package complexity + - wrapcheck # a little bit too much for k6, maybe after https://github.com/tomarrell/wrapcheck/issues/2 is fixed + - varnamelen + - ireturn + - tagliatelle + - exhaustruct + - execinquery + - maintidx + - grouper + - decorder + - nonamedreturns + fast: false \ No newline at end of file diff --git a/vendor/github.com/oleiade/reflections/.vale.ini b/vendor/github.com/oleiade/reflections/.vale.ini new file mode 100644 index 0000000000..133bc22a56 --- /dev/null +++ b/vendor/github.com/oleiade/reflections/.vale.ini @@ -0,0 +1,9 @@ +StylesPath = styles + +MinAlertLevel = suggestion +Vocab = Lane + +Packages = Google, proselint, write-good + +[*] +BasedOnStyles = Vale, Google, proselint, write-good diff --git a/vendor/github.com/oleiade/reflections/README.md b/vendor/github.com/oleiade/reflections/README.md index 8f6b38d705..b7f264c06a 100644 --- a/vendor/github.com/oleiade/reflections/README.md +++ b/vendor/github.com/oleiade/reflections/README.md @@ -1,235 +1,256 @@ -Reflections -=========== +# Reflections -Package reflections provides high level abstractions above the golang reflect library. +[![MIT License](https://img.shields.io/badge/License-MIT-green.svg)](https://choosealicense.com/licenses/mit/) +[![Build Status](https://github.com/oleiade/reflections/actions/workflows/go.yml/badge.svg)](https://github.com/oleiade/reflections/actions/workflows/go.yml) +[![Go Documentation](https://pkg.go.dev/badge/github.com/oleiade/reflections)](https://pkg.go.dev/github.com/oleiade/reflections) +[![Go Report Card](https://goreportcard.com/badge/github.com/oleiade/reflections)](https://goreportcard.com/report/github.com/oleiade/reflections) +![Go Version](https://img.shields.io/github/go-mod/go-version/oleiade/reflections) -Reflect library is very low-level and can be quite complex when it comes to do simple things like accessing a structure field value, a field tag... +The `reflections` library provides high-level abstractions on top of the go language standard `reflect` library. -The purpose of reflections package is to make developers life easier when it comes to introspect structures at runtime. -Its API is inspired from python language (getattr, setattr, hasattr...) and provides a simplified access to structure fields and tags. +In practice, the `reflect` library's API proves somewhat low-level and un-intuitive. Using it can turn out pretty complex, daunting, and scary, especially when doing simple things like accessing a structure field value, a field tag, etc. -*Reflections is an open source library under the MIT license. Any hackers are welcome to supply ideas, features requests, patches, pull requests and so on: see [Contribute]()* +The `reflections` package aims to make developers' life easier when it comes to introspect struct values at runtime. Its API takes inspiration in the python language's `getattr,` `setattr,` and `hasattr` set of methods and provides simplified access to structure fields and tags. -#### Documentation +- [Reflections](#reflections) + - [Documentation](#documentation) + - [Usage](#usage) + - [`GetField`](#getfield) + - [`GetFieldKind`](#getfieldkind) + - [`GetFieldType`](#getfieldtype) + - [`GetFieldTag`](#getfieldtag) + - [`HasField`](#hasfield) + - [`Fields`](#fields) + - [`Items`](#items) + - [`Tags`](#tags) + - [`GetFieldNameByTagValue`](#getfieldnamebytagvalue) + - [Important notes](#important-notes) + - [Contribute](#contribute) -Documentation is available at http://godoc.org/github.com/oleiade/reflections +## Documentation -## Usage +Head to the [documentation](https://pkg.go.dev/github.com/oleiade/reflections) to get more details on the library's API. -#### Accessing structure fields +## Usage -##### GetField +### `GetField` -*GetField* returns the content of a structure field. It can be very usefull when -you'd wanna iterate over a struct specific fields values for example. You can whether -provide *GetField* a structure or a pointer to structure as first argument. +`GetField` returns the content of a structure field. For example, it proves beneficial when you want to iterate over struct-specific field values. You can provide `GetField` a structure or a pointer to a struct as the first argument. ```go - s := MyStruct { - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } - - fieldsToExtract := []string{"FirstField", "ThirdField"} - - for _, fieldName := range fieldsToExtract { - value, err := reflections.GetField(s, fieldName) - DoWhatEverWithThatValue(value) - } +s := MyStruct { + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} + +fieldsToExtract := []string{"FirstField", "ThirdField"} + +for _, fieldName := range fieldsToExtract { + value, err := reflections.GetField(s, fieldName) + DoWhatEverWithThatValue(value) +} ``` -##### GetFieldKind +### `GetFieldKind` -*GetFieldKind* returns the [reflect.Kind](http://golang.org/src/pkg/reflect/type.go?s=6916:6930#L189) of a structure field. It can be used to operate type assertion over a structure fields at runtime. You can whether provide *GetFieldKind* a structure or a pointer to structure as first argument. +`GetFieldKind` returns the [`reflect.Kind`](http://golang.org/src/pkg/reflect/type.go?s=6916:6930#L189) of a structure field. You can use it to operate type assertion over a structure field at runtime. You can provide `GetFieldKind` a structure or a pointer to structure as the first argument. ```go - s := MyStruct{ - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } - - var firstFieldKind reflect.String - var secondFieldKind reflect.Int - var err error - - firstFieldKind, err = GetFieldKind(s, "FirstField") - if err != nil { - log.Fatal(err) - } - - secondFieldKind, err = GetFieldKind(s, "SecondField") - if err != nil { - log.Fatal(err) - } +s := MyStruct{ + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} + +var firstFieldKind reflect.String +var secondFieldKind reflect.Int +var err error + +firstFieldKind, err = GetFieldKind(s, "FirstField") +if err != nil { + log.Fatal(err) +} + +secondFieldKind, err = GetFieldKind(s, "SecondField") +if err != nil { + log.Fatal(err) +} ``` -##### GetFieldType +### `GetFieldType` -*GetFieldType* returns the string literal of a structure field type. It can be used to operate type assertion over a structure fields at runtime. You can whether provide *GetFieldType* a structure or a pointer to structure as first argument. +`GetFieldType` returns the string literal of a structure field type. You can use it to operate type assertion over a structure field at runtime. You can provide `GetFieldType` a structure or a pointer to structure as the first argument. ```go - s := MyStruct{ - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } - - var firstFieldKind string - var secondFieldKind string - var err error - - firstFieldKind, err = GetFieldType(s, "FirstField") - if err != nil { - log.Fatal(err) - } - - secondFieldKind, err = GetFieldType(s, "SecondField") - if err != nil { - log.Fatal(err) - } +s := MyStruct{ + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} + +var firstFieldKind string +var secondFieldKind string +var err error + +firstFieldKind, err = GetFieldType(s, "FirstField") +if err != nil { + log.Fatal(err) +} + +secondFieldKind, err = GetFieldType(s, "SecondField") +if err != nil { + log.Fatal(err) +} ``` -##### GetFieldTag - -*GetFieldTag* extracts a specific structure field tag. You can whether provide *GetFieldTag* a structure or a pointer to structure as first argument. +### `GetFieldTag` +`GetFieldTag` extracts a specific structure field tag. You can provide `GetFieldTag` a structure or a pointer to structure as the first argument. ```go - s := MyStruct{} - - tag, err := reflections.GetFieldTag(s, "FirstField", "matched") - if err != nil { - log.Fatal(err) - } - fmt.Println(tag) - - tag, err = reflections.GetFieldTag(s, "ThirdField", "unmatched") - if err != nil { - log.Fatal(err) - } - fmt.Println(tag) +s := MyStruct{} + +tag, err := reflections.GetFieldTag(s, "FirstField", "matched") +if err != nil { + log.Fatal(err) +} +fmt.Println(tag) + +tag, err = reflections.GetFieldTag(s, "ThirdField", "unmatched") +if err != nil { + log.Fatal(err) +} +fmt.Println(tag) ``` -##### HasField - -*HasField* asserts a field exists through structure. You can whether provide *HasField* a structure or a pointer to structure as first argument. +### `HasField` +`HasField` asserts a field exists through the structure. You can provide `HasField` a struct or a pointer to a struct as the first argument. ```go - s := MyStruct { - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } +s := MyStruct { + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} - // has == true - has, _ := reflections.HasField(s, "FirstField") +// has == true +has, _ := reflections.HasField(s, "FirstField") - // has == false - has, _ := reflections.HasField(s, "FourthField") +// has == false +has, _ := reflections.HasField(s, "FourthField") ``` -##### Fields - -*Fields* returns the list of a structure field names, so you can access or modify them later on. You can whether provide *Fields* a structure or a pointer to structure as first argument. +### `Fields` +`Fields` returns the list of structure field names so that you can access or update them later. You can provide `Fields` with a struct or a pointer to a struct as the first argument. ```go - s := MyStruct { - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } - - var fields []string - - // Fields will list every structure exportable fields. - // Here, it's content would be equal to: - // []string{"FirstField", "SecondField", "ThirdField"} - fields, _ = reflections.Fields(s) +s := MyStruct { + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} + +var fields []string + +// Fields will list every structure exportable fields. +// Here, it's content would be equal to: +// []string{"FirstField", "SecondField", "ThirdField"} +fields, _ = reflections.Fields(s) ``` -##### Items - -*Items* returns the structure's field name to values map. You can whether provide *Items* a structure or a pointer to structure as first argument. +### `Items` +`Items` returns the structure's field name to the values map. You can provide `Items` with a struct or a pointer to structure as the first argument. ```go - s := MyStruct { - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } +s := MyStruct { + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} - var structItems map[string]interface{} +var structItems map[string]interface{} - // Items will return a field name to - // field value map - structItems, _ = reflections.Items(s) +// Items will return a field name to +// field value map +structItems, _ = reflections.Items(s) ``` -##### Tags - -*Tags* returns the structure's fields tag with the provided key. You can whether provide *Tags* a structure or a pointer to structure as first argument. +### `Tags` +`Tags` returns the structure's fields tag with the provided key. You can provide `Tags` with a struct or a pointer to a struct as the first argument. ```go - s := MyStruct { - FirstField: "first value", `matched:"first tag"` - SecondField: 2, `matched:"second tag"` - ThirdField: "third value", `unmatched:"third tag"` - } - - var structTags map[string]string - - // Tags will return a field name to tag content - // map. Nota that only field with the tag name - // you've provided which will be matched. - // Here structTags will contain: - // { - // "FirstField": "first tag", - // "SecondField": "second tag", - // } - structTags, _ = reflections.Tags(s, "matched") +s := MyStruct { + FirstField: "first value", `matched:"first tag"` + SecondField: 2, `matched:"second tag"` + ThirdField: "third value", `unmatched:"third tag"` +} + +var structTags map[string]string + +// Tags will return a field name to tag content +// map. N.B that only field with the tag name +// you've provided will be matched. +// Here structTags will contain: +// { +// "FirstField": "first tag", +// "SecondField": "second tag", +// } +structTags, _ = reflections.Tags(s, "matched") ``` -#### Set a structure field value +### `SetField` -*SetField* update's a structure's field value with the one provided. Note that -unexported fields cannot be set, and that field type and value type have to match. +`SetField` updates a structure's field value with the one provided. Note that you can't set un-exported fields and that the field and value types must match. ```go - s := MyStruct { - FirstField: "first value", - SecondField: 2, - ThirdField: "third value", - } - - // In order to be able to set the structure's values, - // a pointer to it has to be passed to it. - _ := reflections.SetField(&s, "FirstField", "new value") - - // If you try to set a field's value using the wrong type, - // an error will be returned - err := reflection.SetField(&s, "FirstField", 123) // err != nil +s := MyStruct { + FirstField: "first value", + SecondField: 2, + ThirdField: "third value", +} + +//To be able to set the structure's values, +// it must be passed as a pointer. +_ := reflections.SetField(&s, "FirstField", "new value") + +// If you try to set a field's value using the wrong type, +// an error will be returned +err := reflection.SetField(&s, "FirstField", 123) // err != nil ``` -## Important notes +### `GetFieldNameByTagValue` -* **unexported fields** cannot be accessed or set using reflections library: the golang reflect library intentionaly prohibits unexported fields values access or modifications. +`GetFieldNameByTagValue` looks up a field with a matching `{tagKey}:"{tagValue}"` tag in the provided `obj` item. +If `obj` is not a `struct`, nor a `pointer`, or it does not have a field tagged with the `tagKey`, and the matching `tagValue`, this function returns an error. +```go +s := MyStruct { + FirstField: "first value", `matched:"first tag"` + SecondField: 2, `matched:"second tag"` + ThirdField: "third value", `unmatched:"third tag"` +} + +// Getting field name from external source as json would be a headache to convert it manually, +// so we get it directly from struct tag +// returns fieldName = "FirstField" +fieldName, _ = reflections.GetFieldNameByTagValue(s, "matched", "first tag"); + +// later we can do GetField(s, fieldName) +``` -## Contribute -* Check for open issues or open a fresh issue to start a discussion around a feature idea or a bug. -* Fork `the repository`_ on GitHub to start making your changes to the **master** branch (or branch off of it). -* Write tests which shows that the bug was fixed or that the feature works as expected. -* Send a pull request and bug the maintainer until it gets merged and published. :) Make sure to add yourself to AUTHORS_. +## Important notes -[the repository](http://github.com/oleiade/reflections) -[AUTHORS](https://github.com/oleiade/reflections/blob/master/AUTHORS.md) +- **Un-exported fields** can't be accessed nor set using the `reflections` library. The Go lang standard `reflect` library intentionally prohibits un-exported fields values access or modifications. +## Contribute -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/oleiade/reflections/trend.png)](https://bitdeli.com/free "Bitdeli Badge") +- Check for open issues or open a new issue to start a discussion around a feature idea or a bug. +- Fork [the repository](http://github.com/oleiade/reflections) on GitHub to start making your changes to the **master** branch, or branch off of it. +- Write tests showing that the bug was fixed or the feature works as expected. +- Send a pull request and bug the maintainer until it gets merged and published. :) Make sure to add yourself to [`AUTHORS`](https://github.com/oleiade/reflections/blob/master/AUTHORS.md). diff --git a/vendor/github.com/oleiade/reflections/reflections.go b/vendor/github.com/oleiade/reflections/reflections.go index b25b423bb1..90c0ac298d 100644 --- a/vendor/github.com/oleiade/reflections/reflections.go +++ b/vendor/github.com/oleiade/reflections/reflections.go @@ -1,16 +1,18 @@ -// Copyright (c) 2013 Théo Crevon +// Copyright © 2013 Théo Crevon // // See the file LICENSE for copying permission. -/* -Package reflections provides high level abstractions above the -reflect library. - -Reflect library is very low-level and as can be quite complex when it comes to do simple things like accessing a structure field value, a field tag... - -The purpose of reflections package is to make developers life easier when it comes to introspect structures at runtime. -It's API is freely inspired from python language (getattr, setattr, hasattr...) and provides a simplified access to structure fields and tags. -*/ +// Package reflections provides high level abstractions over the Go standard [reflect] library. +// +// In practice, the `reflect` library's API proves somewhat low-level and un-intuitive. +// Using it can turn out pretty complex, daunting, and scary, when doing simple +// things like accessing a structure field value, a field tag, etc. +// +// The `reflections` package aims to make developers' life easier when it comes to introspect +// struct values at runtime. Its API takes inspiration in the python language's `getattr,` `setattr,` and `hasattr` set +// of methods and provides simplified access to structure fields and tags. +// +// [reflect]: http://golang.org/pkg/reflect/ package reflections import ( @@ -19,61 +21,68 @@ import ( "reflect" ) -// GetField returns the value of the provided obj field. obj can whether -// be a structure or pointer to structure. +// ErrUnsupportedType indicates that the provided type doesn't support the requested reflection operation. +var ErrUnsupportedType = errors.New("unsupported type") + +// ErrUnexportedField indicates that an operation failed as a result of +// applying to a non-exported struct field. +var ErrUnexportedField = errors.New("unexported field") + +// GetField returns the value of the provided obj field. +// The `obj` can either be a structure or pointer to structure. func GetField(obj interface{}, name string) (interface{}, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return nil, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return nil, fmt.Errorf("cannot use GetField on a non-struct object: %w", ErrUnsupportedType) } objValue := reflectValue(obj) field := objValue.FieldByName(name) if !field.IsValid() { - return nil, fmt.Errorf("No such field: %s in obj", name) + return nil, fmt.Errorf("no such field: %s in obj", name) } return field.Interface(), nil } -// GetFieldKind returns the kind of the provided obj field. obj can whether -// be a structure or pointer to structure. +// GetFieldKind returns the kind of the provided obj field. +// The `obj` can either be a structure or pointer to structure. func GetFieldKind(obj interface{}, name string) (reflect.Kind, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return reflect.Invalid, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return reflect.Invalid, fmt.Errorf("cannot use GetFieldKind on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) field := objValue.FieldByName(name) if !field.IsValid() { - return reflect.Invalid, fmt.Errorf("No such field: %s in obj", name) + return reflect.Invalid, fmt.Errorf("no such field: %s in obj", name) } return field.Type().Kind(), nil } -// GetFieldType returns the kind of the provided obj field. obj can whether -// be a structure or pointer to structure. +// GetFieldType returns the kind of the provided obj field. +// The `obj` can either be a structure or pointer to structure. func GetFieldType(obj interface{}, name string) (string, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return "", errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return "", fmt.Errorf("cannot use GetFieldType on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) field := objValue.FieldByName(name) if !field.IsValid() { - return "", fmt.Errorf("No such field: %s in obj", name) + return "", fmt.Errorf("no such field: %s in obj", name) } return field.Type().String(), nil } -// GetFieldTag returns the provided obj field tag value. obj can whether -// be a structure or pointer to structure. +// GetFieldTag returns the provided obj field tag value. +// The `obj` parameter can either be a structure or pointer to structure. func GetFieldTag(obj interface{}, fieldName, tagKey string) (string, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return "", errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return "", fmt.Errorf("cannot use GetFieldTag on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) @@ -81,37 +90,59 @@ func GetFieldTag(obj interface{}, fieldName, tagKey string) (string, error) { field, ok := objType.FieldByName(fieldName) if !ok { - return "", fmt.Errorf("No such field: %s in obj", fieldName) + return "", fmt.Errorf("no such field: %s in obj", fieldName) } if !isExportableField(field) { - return "", errors.New("Cannot GetFieldTag on a non-exported struct field") + return "", fmt.Errorf("cannot GetFieldTag on a non-exported struct field: %w", ErrUnexportedField) } return field.Tag.Get(tagKey), nil } -// SetField sets the provided obj field with provided value. obj param has -// to be a pointer to a struct, otherwise it will soundly fail. Provided -// value type should match with the struct field you're trying to set. +// GetFieldNameByTagValue looks up a field with a matching `{tagKey}:"{tagValue}"` tag in the provided `obj` item. +// The `obj` parameter must be a `struct`, or a `pointer` to one. If the `obj` parameter doesn't have a field tagged +// with the `tagKey`, and the matching `tagValue`, this function returns an error. +func GetFieldNameByTagValue(obj interface{}, tagKey, tagValue string) (string, error) { + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return "", fmt.Errorf("cannot use GetFieldByTag on a non-struct interface: %w", ErrUnsupportedType) + } + + objValue := reflectValue(obj) + objType := objValue.Type() + fieldsCount := objType.NumField() + + for i := range fieldsCount { + structField := objType.Field(i) + if structField.Tag.Get(tagKey) == tagValue { + return structField.Name, nil + } + } + + return "", errors.New("tag doesn't exist in the given struct") +} + +// SetField sets the provided obj field with provided value. +// +// The `obj` parameter must be a pointer to a struct, otherwise it soundly fails. +// The provided `value` type should match with the struct field being set. func SetField(obj interface{}, name string, value interface{}) error { // Fetch the field reflect.Value structValue := reflect.ValueOf(obj).Elem() structFieldValue := structValue.FieldByName(name) if !structFieldValue.IsValid() { - return fmt.Errorf("No such field: %s in obj", name) + return fmt.Errorf("no such field: %s in obj", name) } - // If obj field value is not settable an error is thrown if !structFieldValue.CanSet() { - return fmt.Errorf("Cannot set %s field value", name) + return fmt.Errorf("cannot set %s field value", name) } structFieldType := structFieldValue.Type() val := reflect.ValueOf(value) - if structFieldType != val.Type() { - invalidTypeError := errors.New("Provided value type didn't match obj field type") + if !val.Type().AssignableTo(structFieldType) { + invalidTypeError := errors.New("provided value type not assignable to obj field type") return invalidTypeError } @@ -119,11 +150,11 @@ func SetField(obj interface{}, name string, value interface{}) error { return nil } -// HasField checks if the provided field name is part of a struct. obj can whether -// be a structure or pointer to structure. +// HasField checks if the provided `obj` struct has field named `name`. +// The `obj` can either be a structure or pointer to structure. func HasField(obj interface{}, name string) (bool, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return false, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return false, fmt.Errorf("cannot use HasField on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) @@ -136,21 +167,22 @@ func HasField(obj interface{}, name string) (bool, error) { return true, nil } -// Fields returns the struct fields names list. obj can whether -// be a structure or pointer to structure. +// Fields returns the struct fields names list. +// The `obj` parameter can either be a structure or pointer to structure. func Fields(obj interface{}) ([]string, error) { return fields(obj, false) } -// FieldsDeep returns "flattened" fields (fields from anonymous -// inner structs are treated as normal fields) +// FieldsDeep returns "flattened" fields. +// +// Note that FieldsDeep treats fields from anonymous inner structs as normal fields. func FieldsDeep(obj interface{}) ([]string, error) { return fields(obj, true) } func fields(obj interface{}, deep bool) ([]string, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return nil, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return nil, fmt.Errorf("cannot use fields on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) @@ -158,40 +190,41 @@ func fields(obj interface{}, deep bool) ([]string, error) { fieldsCount := objType.NumField() var allFields []string - for i := 0; i < fieldsCount; i++ { + for i := range fieldsCount { field := objType.Field(i) if isExportableField(field) { - if deep && field.Anonymous { - fieldValue := objValue.Field(i) - subFields, err := fields(fieldValue.Interface(), deep) - if err != nil { - return nil, fmt.Errorf("Cannot get fields in %s: %s", field.Name, err.Error()) - } - allFields = append(allFields, subFields...) - } else { + if !deep || !field.Anonymous { allFields = append(allFields, field.Name) + continue + } + + fieldValue := objValue.Field(i) + subFields, err := fields(fieldValue.Interface(), deep) + if err != nil { + return nil, fmt.Errorf("cannot get fields in %s: %w", field.Name, err) } + allFields = append(allFields, subFields...) } } return allFields, nil } -// Items returns the field - value struct pairs as a map. obj can whether -// be a structure or pointer to structure. +// Items returns the field:value struct pairs as a map. +// The `obj` parameter can either be a structure or pointer to structure. func Items(obj interface{}) (map[string]interface{}, error) { return items(obj, false) } -// FieldsDeep returns "flattened" items (fields from anonymous -// inner structs are treated as normal fields) +// ItemsDeep returns "flattened" items. +// Note that ItemsDeep will treat fields from anonymous inner structs as normal fields. func ItemsDeep(obj interface{}) (map[string]interface{}, error) { return items(obj, true) } func items(obj interface{}, deep bool) (map[string]interface{}, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return nil, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return nil, fmt.Errorf("cannot use items on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) @@ -200,20 +233,23 @@ func items(obj interface{}, deep bool) (map[string]interface{}, error) { allItems := make(map[string]interface{}) - for i := 0; i < fieldsCount; i++ { + for i := range fieldsCount { field := objType.Field(i) fieldValue := objValue.Field(i) + if isExportableField(field) { - if deep && field.Anonymous { - if m, err := items(fieldValue.Interface(), deep); err == nil { - for k, v := range m { - allItems[k] = v - } - } else { - return nil, fmt.Errorf("Cannot get items in %s: %s", field.Name, err.Error()) - } - } else { + if !deep || !field.Anonymous { allItems[field.Name] = fieldValue.Interface() + continue + } + + m, err := items(fieldValue.Interface(), deep) + if err != nil { + return nil, fmt.Errorf("cannot get items in %s: %w", field.Name, err) + } + + for k, v := range m { + allItems[k] = v } } } @@ -221,21 +257,22 @@ func items(obj interface{}, deep bool) (map[string]interface{}, error) { return allItems, nil } -// Tags lists the struct tag fields. obj can whether -// be a structure or pointer to structure. +// Tags lists the struct tag fields. +// The `obj` can whether be a structure or pointer to structure. func Tags(obj interface{}, key string) (map[string]string, error) { return tags(obj, key, false) } -// FieldsDeep returns "flattened" tags (fields from anonymous -// inner structs are treated as normal fields) +// TagsDeep returns "flattened" tags. +// Note that TagsDeep treats fields from anonymous +// inner structs as normal fields. func TagsDeep(obj interface{}, key string) (map[string]string, error) { return tags(obj, key, true) } func tags(obj interface{}, key string, deep bool) (map[string]string, error) { - if !hasValidType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { - return nil, errors.New("Cannot use GetField on a non-struct interface") + if !isSupportedType(obj, []reflect.Kind{reflect.Struct, reflect.Ptr}) { + return nil, fmt.Errorf("cannot use tags on a non-struct interface: %w", ErrUnsupportedType) } objValue := reflectValue(obj) @@ -244,20 +281,22 @@ func tags(obj interface{}, key string, deep bool) (map[string]string, error) { allTags := make(map[string]string) - for i := 0; i < fieldsCount; i++ { + for i := range fieldsCount { structField := objType.Field(i) if isExportableField(structField) { - if deep && structField.Anonymous { - fieldValue := objValue.Field(i) - if m, err := tags(fieldValue.Interface(), key, deep); err == nil { - for k, v := range m { - allTags[k] = v - } - } else { - return nil, fmt.Errorf("Cannot get items in %s: %s", structField.Name, err.Error()) - } - } else { + if !deep || !structField.Anonymous { allTags[structField.Name] = structField.Tag.Get(key) + continue + } + + fieldValue := objValue.Field(i) + m, err := tags(fieldValue.Interface(), key, deep) + if err != nil { + return nil, fmt.Errorf("cannot get items in %s: %w", structField.Name, err) + } + + for k, v := range m { + allTags[k] = v } } } @@ -282,7 +321,7 @@ func isExportableField(field reflect.StructField) bool { return field.PkgPath == "" } -func hasValidType(obj interface{}, types []reflect.Kind) bool { +func isSupportedType(obj interface{}, types []reflect.Kind) bool { for _, t := range types { if reflect.TypeOf(obj).Kind() == t { return true @@ -291,11 +330,3 @@ func hasValidType(obj interface{}, types []reflect.Kind) bool { return false } - -func isStruct(obj interface{}) bool { - return reflect.TypeOf(obj).Kind() == reflect.Struct -} - -func isPointer(obj interface{}) bool { - return reflect.TypeOf(obj).Kind() == reflect.Ptr -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/ast/annotations.go index 1868685542..7d09379fd5 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/annotations.go +++ b/vendor/github.com/open-policy-agent/opa/ast/annotations.go @@ -417,7 +417,7 @@ func (a *Annotations) Copy(node Node) *Annotations { return &cpy } -// toObject constructs an AST Object from a. +// toObject constructs an AST Object from the annotation. func (a *Annotations) toObject() (*Object, *Error) { obj := NewObject() @@ -509,6 +509,34 @@ func (a *Annotations) toObject() (*Object, *Error) { return &obj, nil } +func attachRuleAnnotations(mod *Module) { + // make a copy of the annotations + cpy := make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy[i] = a.Copy(a.node) + } + + for _, rule := range mod.Rules { + var j int + var found bool + for i, a := range cpy { + if rule.Ref().Equal(a.GetTargetPath()) { + if a.Scope == annotationScopeDocument { + rule.Annotations = append(rule.Annotations, a) + } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { + j = i + found = true + rule.Annotations = append(rule.Annotations, a) + } + } + } + + if found && j < len(cpy) { + cpy = append(cpy[:j], cpy[j+1:]...) + } + } +} + func attachAnnotationsNodes(mod *Module) Errors { var errs Errors @@ -528,7 +556,11 @@ func attachAnnotationsNodes(mod *Module) Errors { if a.Scope == "" { switch a.node.(type) { case *Rule: - a.Scope = annotationScopeRule + if a.Entrypoint { + a.Scope = annotationScopeDocument + } else { + a.Scope = annotationScopeRule + } case *Package: a.Scope = annotationScopePackage case *Import: @@ -568,8 +600,9 @@ func validateAnnotationScopeAttachment(a *Annotations) *Error { } func validateAnnotationEntrypointAttachment(a *Annotations) *Error { - if a.Entrypoint && !(a.Scope == annotationScopeRule || a.Scope == annotationScopePackage) { - return NewError(ParseErr, a.Loc(), "annotation entrypoint applied to non-rule or package scope '%v'", a.Scope) + if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { + return NewError( + ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) } return nil } @@ -599,9 +632,8 @@ func (a *AuthorAnnotation) String() string { return a.Name } else if len(a.Name) == 0 { return fmt.Sprintf("<%s>", a.Email) - } else { - return fmt.Sprintf("%s <%s>", a.Name, a.Email) } + return fmt.Sprintf("%s <%s>", a.Name, a.Email) } // Copy returns a deep copy of rr. diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go index 722e3f2ef6..f54d91d317 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go @@ -120,6 +120,7 @@ var DefaultBuiltins = [...]*Builtin{ Lower, Upper, Contains, + StringCount, StartsWith, EndsWith, Split, @@ -142,6 +143,7 @@ var DefaultBuiltins = [...]*Builtin{ // Encoding JSONMarshal, + JSONMarshalWithOptions, JSONUnmarshal, JSONIsValid, Base64Encode, @@ -207,6 +209,7 @@ var DefaultBuiltins = [...]*Builtin{ // Crypto CryptoX509ParseCertificates, CryptoX509ParseAndVerifyCertificates, + CryptoX509ParseAndVerifyCertificatesWithOptions, CryptoMd5, CryptoSha1, CryptoSha256, @@ -1107,6 +1110,19 @@ var Contains = &Builtin{ Categories: stringsCat, } +var StringCount = &Builtin{ + Name: "strings.count", + Description: "Returns the number of non-overlapping instances of a substring in a string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("string to search in"), + types.Named("substring", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("count of occurrences, `0` if not found"), + ), + Categories: stringsCat, +} + var StartsWith = &Builtin{ Name: "startswith", Description: "Returns true if the search string begins with the base string.", @@ -1165,7 +1181,7 @@ var Split = &Builtin{ types.Named("x", types.S).Description("string that is split"), types.Named("delimiter", types.S).Description("delimiter used for splitting"), ), - types.Named("ys", types.NewArray(nil, types.S)).Description("splitted parts"), + types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), ), Categories: stringsCat, } @@ -1231,7 +1247,7 @@ var Trim = &Builtin{ var TrimLeft = &Builtin{ Name: "trim_left", - Description: "Returns `value` with all leading instances of the `cutset` chartacters removed.", + Description: "Returns `value` with all leading instances of the `cutset` characters removed.", Decl: types.NewFunction( types.Args( types.Named("value", types.S).Description("string to trim"), @@ -1257,7 +1273,7 @@ var TrimPrefix = &Builtin{ var TrimRight = &Builtin{ Name: "trim_right", - Description: "Returns `value` with all trailing instances of the `cutset` chartacters removed.", + Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", Decl: types.NewFunction( types.Args( types.Named("value", types.S).Description("string to trim"), @@ -1340,7 +1356,7 @@ var RenderTemplate = &Builtin{ // Marked non-deterministic because it relies on RNG internally. var RandIntn = &Builtin{ Name: "rand.intn", - Description: "Returns a random integer between `0` and `n` (`n` exlusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", + Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", Decl: types.NewFunction( types.Args( types.Named("str", types.S), @@ -1706,6 +1722,27 @@ var JSONMarshal = &Builtin{ Categories: encoding, } +var JSONMarshalWithOptions = &Builtin{ + Name: "json.marshal_with_options", + Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + + "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + types.Named("opts", types.NewObject( + []*types.StaticProperty{ + types.NewStaticProperty("pretty", types.B), + types.NewStaticProperty("indent", types.S), + types.NewStaticProperty("prefix", types.S), + }, + types.NewDynamicProperty(types.S, types.A), + )).Description("encoding options"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), + ), + Categories: encoding, +} + var JSONUnmarshal = &Builtin{ Name: "json.unmarshal", Description: "Deserializes the input string.", @@ -1713,7 +1750,7 @@ var JSONUnmarshal = &Builtin{ types.Args( types.Named("x", types.S).Description("a JSON string"), ), - types.Named("y", types.A).Description("the term deseralized from `x`"), + types.Named("y", types.A).Description("the term deserialized from `x`"), ), Categories: encoding, } @@ -1877,7 +1914,7 @@ var YAMLUnmarshal = &Builtin{ types.Args( types.Named("x", types.S).Description("a YAML string"), ), - types.Named("y", types.A).Description("the term deseralized from `x`"), + types.Named("y", types.A).Description("the term deserialized from `x`"), ), Categories: encoding, } @@ -1914,7 +1951,7 @@ var HexDecode = &Builtin{ types.Args( types.Named("x", types.S).Description("a hex-encoded string"), ), - types.Named("y", types.S).Description("deseralized from `x`"), + types.Named("y", types.S).Description("deserialized from `x`"), ), Categories: encoding, } @@ -2327,6 +2364,31 @@ with all others being treated as intermediates.`, ), } +var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates_with_options", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. A config option passed as the second argument can +be used to configure the validation options used. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + types.Named("options", types.NewObject( + nil, + types.NewDynamicProperty(types.S, types.A), + )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), +} + var CryptoX509ParseCertificateRequest = &Builtin{ Name: "crypto.x509.parse_certificate_request", Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go index 3723b33cc0..23d1ed8fa1 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/check.go +++ b/vendor/github.com/open-policy-agent/opa/ast/check.go @@ -60,7 +60,10 @@ func (tc *typeChecker) copy() *typeChecker { WithVarRewriter(tc.varRewriter). WithSchemaSet(tc.ss). WithAllowNet(tc.allowNet). - WithInputType(tc.input) + WithInputType(tc.input). + WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). + WithBuiltins(tc.builtins). + WithRequiredCapabilities(tc.required) } func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { @@ -93,6 +96,8 @@ func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { return tc } +// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. +// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { tc.allowUndefinedFuncs = allow return tc diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go index d558d4d3f0..c59cfede62 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compile.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go @@ -120,34 +120,35 @@ type Compiler struct { // Capabliities required by the modules that were compiled. Required *Capabilities - localvargen *localVarGenerator - moduleLoader ModuleLoader - ruleIndices *util.HashMap - stages []stage - maxErrs int - sorted []string // list of sorted module names - pathExists func([]string) (bool, error) - after map[string][]CompilerStageDefinition - metrics metrics.Metrics - capabilities *Capabilities // user-supplied capabilities - imports map[string][]*Import // saved imports from stripping - builtins map[string]*Builtin // universe of built-in functions - customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) - unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) - deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions - enablePrintStatements bool // indicates if print statements should be elided (default) - comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index - initialized bool // indicates if init() has been called - debug debug.Debug // emits debug information produced during compilation - schemaSet *SchemaSet // user-supplied schemas for input and data documents - inputType types.Type // global input type retrieved from schema set - annotationSet *AnnotationSet // hierarchical set of annotations - strict bool // enforce strict compilation checks - keepModules bool // whether to keep the unprocessed, parse modules (below) - parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true - useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker - allowUndefinedFuncCalls bool // don't error on calls to unknown functions. - evalMode CompilerEvalMode + localvargen *localVarGenerator + moduleLoader ModuleLoader + ruleIndices *util.HashMap + stages []stage + maxErrs int + sorted []string // list of sorted module names + pathExists func([]string) (bool, error) + after map[string][]CompilerStageDefinition + metrics metrics.Metrics + capabilities *Capabilities // user-supplied capabilities + imports map[string][]*Import // saved imports from stripping + builtins map[string]*Builtin // universe of built-in functions + customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) + unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) + deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions + enablePrintStatements bool // indicates if print statements should be elided (default) + comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index + initialized bool // indicates if init() has been called + debug debug.Debug // emits debug information produced during compilation + schemaSet *SchemaSet // user-supplied schemas for input and data documents + inputType types.Type // global input type retrieved from schema set + annotationSet *AnnotationSet // hierarchical set of annotations + strict bool // enforce strict compilation checks + keepModules bool // whether to keep the unprocessed, parse modules (below) + parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true + useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker + allowUndefinedFuncCalls bool // don't error on calls to unknown functions. + evalMode CompilerEvalMode // + rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. } // CompilerStage defines the interface for stages in the compiler. @@ -346,6 +347,7 @@ func NewCompiler() *Compiler { {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, + {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, @@ -469,6 +471,13 @@ func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { return c } +// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, +// so they can be accessed by tracing. +func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { + c.rewriteTestRulesForTracing = rewrite + return c +} + // ParsedModules returns the parsed, unprocessed modules from the compiler. // It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. // The map includes all modules loaded via the ModuleLoader, if one was used. @@ -1585,7 +1594,7 @@ func (c *Compiler) compile() { } } - if c.allowUndefinedFuncCalls && s.name == "CheckUndefinedFuncs" { + if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { continue } @@ -2167,6 +2176,43 @@ func (c *Compiler) rewriteDynamicTerms() { } } +// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise +// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. +// For example, given the following module: +// +// package test +// +// p.q contains v if { +// some v in numbers.range(1, 3) +// } +// +// p.r := "foo" +// +// test_rule { +// p == { +// "q": {4, 5, 6} +// } +// } +// +// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. +// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. +func (c *Compiler) rewriteTestRuleEqualities() { + if !c.rewriteTestRulesForTracing { + return + } + + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + if strings.HasPrefix(string(rule.Head.Name), "test_") { + rule.Body = rewriteTestEqualities(f, rule.Body) + } + return false + }) + } +} + func (c *Compiler) parseMetadataBlocks() { // Only parse annotations if rego.metadata built-ins are called regoMetadataCalled := false @@ -2196,6 +2242,8 @@ func (c *Compiler) parseMetadataBlocks() { for _, err := range errs { c.err(err) } + + attachRuleAnnotations(mod) } } } @@ -4192,7 +4240,7 @@ func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { // Object keys cannot be pattern matched so only walk values. case *object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(_, v *Term) { vis.Walk(v) }) @@ -4515,6 +4563,41 @@ func rewriteEquals(x interface{}) (modified bool) { return modified } +func rewriteTestEqualities(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before + // reaching the negation check. + if !expr.Negated && !expr.Generated { + switch { + case expr.IsEquality(): + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) + case expr.IsEvery(): + // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. + // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. + every := expr.Terms.(*Every) + every.Body = rewriteTestEqualities(f, every.Body) + } + } + result = appendExpr(result, expr) + } + return result +} + +func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch term.Value.(type) { + case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + // rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and // comprehensions) are bound to vars earlier in the query. This translation // results in eager evaluation. @@ -4606,6 +4689,7 @@ func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result B generated := f.Generate(term) generated.With = original.With result.Append(generated) + connectGeneratedExprs(original, generated) return result, result[len(result)-1].Operand(0) case *Array: for i := 0; i < v.Len(); i++ { @@ -4634,16 +4718,19 @@ func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result B var extra *Expr v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) result.Append(extra) + connectGeneratedExprs(original, extra) return result, result[len(result)-1].Operand(0) case *SetComprehension: var extra *Expr v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) result.Append(extra) + connectGeneratedExprs(original, extra) return result, result[len(result)-1].Operand(0) case *ObjectComprehension: var extra *Expr v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) result.Append(extra) + connectGeneratedExprs(original, extra) return result, result[len(result)-1].Operand(0) } return result, term @@ -4711,6 +4798,7 @@ func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { for i := 1; i < len(terms); i++ { var extras []*Expr extras, terms[i] = expandExprTerm(gen, terms[i]) + connectGeneratedExprs(expr, extras...) if len(expr.With) > 0 { for i := range extras { extras[i].With = expr.With @@ -4721,16 +4809,14 @@ func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { result = append(result, expr) case *Every: var extras []*Expr - if _, ok := terms.Domain.Value.(Call); ok { - extras, terms.Domain = expandExprTerm(gen, terms.Domain) - } else { - term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) - eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) - eq.Generated = true - eq.With = expr.With - extras = append(extras, eq) - terms.Domain = term - } + + term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) + eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) + eq.Generated = true + eq.With = expr.With + extras = expandExpr(gen, eq) + terms.Domain = term + terms.Body = rewriteExprTermsInBody(gen, terms.Body) result = append(result, extras...) result = append(result, expr) @@ -4738,6 +4824,13 @@ func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { return } +func connectGeneratedExprs(parent *Expr, children ...*Expr) { + for _, child := range children { + child.generatedFrom = parent + parent.generates = append(parent.generates, child) + } +} + func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { output = term switch v := term.Value.(type) { @@ -5492,26 +5585,34 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr return false, err } + isAllowedUnknownFuncCall := false + if c.allowUndefinedFuncCalls { + switch target.Value.(type) { + case Ref, Var: + isAllowedUnknownFuncCall = true + } + } + switch { case isDataRef(target): ref := target.Value.(Ref) - node := c.RuleTree + targetNode := c.RuleTree for i := 0; i < len(ref)-1; i++ { - child := node.Child(ref[i].Value) + child := targetNode.Child(ref[i].Value) if child == nil { break } else if len(child.Values) > 0 { return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") } - node = child + targetNode = child } - if node != nil { + if targetNode != nil { // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated // TypeEnv yet -- so we have to make do with this check to see if the replacement // target is a function. It's probably wrong for arity-0 functions, but those are // and edge case anyways. - if child := node.Child(ref[len(ref)-1].Value); child != nil { + if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { for _, v := range child.Values { if len(v.(*Rule).Head.Args) > 0 { if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { @@ -5521,6 +5622,18 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr } } } + + // If the with-value is a ref to a function, but not a call, we can't rewrite it + if r, ok := value.Value.(Ref); ok { + // TODO: check that target ref doesn't exist? + if valueNode := c.RuleTree.Find(r); valueNode != nil { + for _, v := range valueNode.Values { + if len(v.(*Rule).Head.Args) > 0 { + return false, nil + } + } + } + } case isInputRef(target): // ok, valid case isBuiltinRefOrVar: @@ -5539,6 +5652,9 @@ func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { return false, err // err may be nil } + case isAllowedUnknownFuncCall: + // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. + return false, nil default: return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go index 784a34c047..c767aafefb 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/env.go +++ b/vendor/github.com/open-policy-agent/opa/ast/env.go @@ -472,7 +472,7 @@ func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) ( func (n *typeTreeNode) Leafs() map[*Ref]types.Type { leafs := map[*Ref]types.Type{} - n.children.Iter(func(k, v util.T) bool { + n.children.Iter(func(_, v util.T) bool { collectLeafs(v.(*typeTreeNode), nil, leafs) return false }) @@ -485,7 +485,7 @@ func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { leafs[&nPath] = n.Value() return } - n.children.Iter(func(k, v util.T) bool { + n.children.Iter(func(_, v util.T) bool { collectLeafs(v.(*typeTreeNode), nPath, leafs) return false }) diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go index e14bb72eef..cb0cbea323 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/ast/index.go @@ -164,7 +164,7 @@ func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { return result, nil } -func (i *baseDocEqIndex) AllRules(resolver ValueResolver) (*IndexResult, error) { +func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { tr := newTrieTraversalResult() // Walk over the rule trie and accumulate _all_ rules @@ -494,7 +494,7 @@ func (node *trieNode) String() string { func (node *trieNode) append(prio [2]int, rule *Rule) { node.rules = append(node.rules, &ruleNode{prio, rule}) - if node.values != nil { + if node.values != nil && rule.Head.Value != nil { node.values.Add(rule.Head.Value) return } diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go index 97ee8bde01..a0200ac18d 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go +++ b/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go @@ -26,6 +26,7 @@ type Scanner struct { width int errors []Error keywords map[string]tokens.Token + tabs []int regoV1Compatible bool } @@ -37,10 +38,11 @@ type Error struct { // Position represents a point in the scanned source code. type Position struct { - Offset int // start offset in bytes - End int // end offset in bytes - Row int // line number computed in bytes - Col int // column number computed in bytes + Offset int // start offset in bytes + End int // end offset in bytes + Row int // line number computed in bytes + Col int // column number computed in bytes + Tabs []int // positions of any tabs preceding Col } // New returns an initialized scanner that will scan @@ -60,6 +62,7 @@ func New(r io.Reader) (*Scanner, error) { curr: -1, width: 0, keywords: tokens.Keywords(), + tabs: []int{}, } s.next() @@ -156,7 +159,7 @@ func (s *Scanner) WithoutKeywords(kws map[string]tokens.Token) (*Scanner, map[st // for any errors before using the other values. func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) { - pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col} + pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col, Tabs: s.tabs} var tok tokens.Token var lit string @@ -410,8 +413,12 @@ func (s *Scanner) next() { if s.curr == '\n' { s.row++ s.col = 0 + s.tabs = []int{} } else { s.col++ + if s.curr == '\t' { + s.tabs = append(s.tabs, s.col) + } } } diff --git a/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/ast/location/location.go index 309351a1ed..92226df3f0 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/location/location.go +++ b/vendor/github.com/open-policy-agent/opa/ast/location/location.go @@ -20,6 +20,8 @@ type Location struct { // JSONOptions specifies options for marshaling and unmarshalling of locations JSONOptions astJSON.Options + + Tabs []int `json:"-"` // The column offsets of tabs in the source. } // NewLocation returns a new Location object. diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser.go b/vendor/github.com/open-policy-agent/opa/ast/parser.go index 75dc61a983..0ad15f631b 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser.go @@ -43,6 +43,33 @@ const ( RegoV1 ) +func (v RegoVersion) Int() int { + if v == RegoV1 { + return 1 + } + return 0 +} + +func (v RegoVersion) String() string { + switch v { + case RegoV0: + return "v0" + case RegoV1: + return "v1" + case RegoV0CompatV1: + return "v0v1" + default: + return "unknown" + } +} + +func RegoVersionFromInt(i int) RegoVersion { + if i == 1 { + return RegoV1 + } + return RegoV0 +} + // Note: This state is kept isolated from the parser so that we // can do efficient shallow copies of these values when doing a // save() and restore(). @@ -582,7 +609,12 @@ func (p *Parser) parseImport() *Import { path := imp.Path.Value.(Ref) - if !RootDocumentNames.Contains(path[0]) && !FutureRootDocument.Equal(path[0]) && !RegoRootDocument.Equal(path[0]) { + switch { + case RootDocumentNames.Contains(path[0]): + case FutureRootDocument.Equal(path[0]): + case RegoRootDocument.Equal(path[0]): + default: + p.hint("if this is unexpected, try updating OPA") p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), path[0]) @@ -929,12 +961,10 @@ func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) } case tokens.Assign: - s := p.save() p.scan() head.Assign = true head.Value = p.parseTermInfixCall() if head.Value == nil { - p.restore(s) switch { case len(head.Args) > 0: p.illegal("expected function value term (e.g., %s(...) := { ... })", name) @@ -2118,6 +2148,7 @@ func (p *Parser) doScan(skipws bool) { p.s.loc.Col = pos.Col p.s.loc.Offset = pos.Offset p.s.loc.Text = p.s.Text(pos.Offset, pos.End) + p.s.loc.Tabs = pos.Tabs for _, err := range errs { p.error(p.s.Loc(), err.Message) @@ -2294,6 +2325,11 @@ func (b *metadataParser) Parse() (*Annotations, error) { b.loc = comment.Location } } + + if match == nil && len(b.comments) > 0 { + b.loc = b.comments[0].Location + } + return nil, augmentYamlError(err, b.comments) } @@ -2361,6 +2397,21 @@ func (b *metadataParser) Parse() (*Annotations, error) { } result.Location = b.loc + + // recreate original text of entire metadata block for location text attribute + sb := strings.Builder{} + sb.WriteString("# METADATA\n") + + lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) + + for _, line := range lines[:len(lines)-1] { + sb.WriteString("# ") + sb.Write(line) + sb.WriteByte('\n') + } + + result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) + return &result, nil } @@ -2398,10 +2449,11 @@ func augmentYamlError(err error, comments []*Comment) error { return err } -func unwrapPair(pair map[string]interface{}) (k string, v interface{}) { - for k, v = range pair { +func unwrapPair(pair map[string]interface{}) (string, interface{}) { + for k, v := range pair { + return k, v } - return + return "", nil } var errInvalidSchemaRef = fmt.Errorf("invalid schema reference") @@ -2556,6 +2608,11 @@ var futureKeywords = map[string]tokens.Token{ "if": tokens.If, } +func IsFutureKeyword(s string) bool { + _, ok := futureKeywords[s] + return ok +} + func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { path := imp.Path.Value.(Ref) @@ -2616,7 +2673,7 @@ func (p *Parser) regoV1Import(imp *Import) { path := imp.Path.Value.(Ref) if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { - p.errorf(imp.Path.Location, "invalid import, must be `%s`", RegoV1CompatibleRef) + p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) return } diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go index 19af82f5b2..afaa1d890c 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go @@ -713,6 +713,8 @@ func parseModule(filename string, stmts []Statement, comments []*Comment, regoCo return nil, errs } + attachRuleAnnotations(mod) + return mod, nil } diff --git a/vendor/github.com/open-policy-agent/opa/ast/policy.go b/vendor/github.com/open-policy-agent/opa/ast/policy.go index 187bb8d765..f07cf7b376 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/policy.go +++ b/vendor/github.com/open-policy-agent/opa/ast/policy.go @@ -100,7 +100,23 @@ var Wildcard = &Term{Value: Var("_")} var WildcardPrefix = "$" // Keywords contains strings that map to language keywords. -var Keywords = [...]string{ +var Keywords = KeywordsV0 + +var KeywordsV0 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", +} + +var KeywordsV1 = [...]string{ "not", "package", "import", @@ -112,6 +128,10 @@ var Keywords = [...]string{ "true", "false", "some", + "if", + "contains", + "in", + "every", } // IsKeyword returns true if s is a language keyword. @@ -124,6 +144,26 @@ func IsKeyword(s string) bool { return false } +// IsKeywordInRegoVersion returns true if s is a language keyword. +func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { + switch regoVersion { + case RegoV0: + for _, x := range KeywordsV0 { + if x == s { + return true + } + } + case RegoV1, RegoV0CompatV1: + for _, x := range KeywordsV1 { + if x == s { + return true + } + } + } + + return false +} + type ( // Node represents a node in an AST. Nodes may be statements in a policy module // or elements of an ad-hoc query, expression, etc. @@ -185,11 +225,12 @@ type ( // Rule represents a rule as defined in the language. Rules define the // content of documents that represent policy decisions. Rule struct { - Default bool `json:"default,omitempty"` - Head *Head `json:"head"` - Body Body `json:"body"` - Else *Rule `json:"else,omitempty"` - Location *Location `json:"location,omitempty"` + Default bool `json:"default,omitempty"` + Head *Head `json:"head"` + Body Body `json:"body"` + Else *Rule `json:"else,omitempty"` + Location *Location `json:"location,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` // Module is a pointer to the module containing this rule. If the rule // was NOT created while parsing/constructing a module, this should be @@ -232,7 +273,9 @@ type ( Negated bool `json:"negated,omitempty"` Location *Location `json:"location,omitempty"` - jsonOptions astJSON.Options + jsonOptions astJSON.Options + generatedFrom *Expr + generates []*Expr } // SomeDecl represents a variable declaration statement. The symbols are variables. @@ -309,8 +352,8 @@ func (mod *Module) Copy() *Module { nodes[mod.Package] = cpy.Package cpy.Annotations = make([]*Annotations, len(mod.Annotations)) - for i := range mod.Annotations { - cpy.Annotations[i] = mod.Annotations[i].Copy(nodes[mod.Annotations[i].node]) + for i, a := range mod.Annotations { + cpy.Annotations[i] = a.Copy(nodes[a.node]) } cpy.Comments = make([]*Comment, len(mod.Comments)) @@ -407,6 +450,12 @@ func (mod *Module) RegoVersion() RegoVersion { return mod.regoVersion } +// SetRegoVersion sets the RegoVersion for the module. +// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. +func (mod *Module) SetRegoVersion(v RegoVersion) { + mod.regoVersion = v +} + // NewComment returns a new Comment object. func NewComment(text []byte) *Comment { return &Comment{ @@ -521,7 +570,7 @@ func (pkg *Package) MarshalJSON() ([]byte, error) { } // IsValidImportPath returns an error indicating if the import path is invalid. -// If the import path is invalid, err is nil. +// If the import path is valid, err is nil. func IsValidImportPath(v Value) (err error) { switch v := v.(type) { case Var: @@ -657,6 +706,11 @@ func (rule *Rule) Compare(other *Rule) int { if cmp := rule.Body.Compare(other.Body); cmp != 0 { return cmp } + + if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { + return cmp + } + return rule.Else.Compare(other.Else) } @@ -665,6 +719,12 @@ func (rule *Rule) Copy() *Rule { cpy := *rule cpy.Head = rule.Head.Copy() cpy.Body = rule.Body.Copy() + + cpy.Annotations = make([]*Annotations, len(rule.Annotations)) + for i, a := range rule.Annotations { + cpy.Annotations[i] = a.Copy(&cpy) + } + if cpy.Else != nil { cpy.Else = rule.Else.Copy() } @@ -757,6 +817,10 @@ func (rule *Rule) MarshalJSON() ([]byte, error) { } } + if len(rule.Annotations) != 0 { + data["annotations"] = rule.Annotations + } + return json.Marshal(data) } @@ -970,16 +1034,22 @@ func (head *Head) setJSONOptions(opts astJSON.Options) { func (head *Head) MarshalJSON() ([]byte, error) { var loc *Location - if head.jsonOptions.MarshalOptions.IncludeLocation.Head { + includeLoc := head.jsonOptions.MarshalOptions.IncludeLocation + if includeLoc.Head { if head.Location != nil { loc = head.Location } + + for _, term := range head.Reference { + if term.Location != nil { + term.jsonOptions.MarshalOptions.IncludeLocation.Term = includeLoc.Term + } + } } // NOTE(sr): we do this to override the rendering of `head.Reference`. // It's still what'll be used via the default means of encoding/json // for unmarshaling a json object into a Head struct! - // NOTE(charlieegan3): we also need to optionally include the location type h Head return json.Marshal(struct { h @@ -1234,7 +1304,7 @@ func (expr *Expr) Equal(other *Expr) bool { // // 1. Declarations are always less than other expressions. // 2. Preceding expression (by Index) is always less than the other expression. -// 3. Non-negated expressions are always less than than negated expressions. +// 3. Non-negated expressions are always less than negated expressions. // 4. Single term expressions are always less than built-in expressions. // // Otherwise, the expression terms are compared normally. If both expressions @@ -1571,6 +1641,46 @@ func NewBuiltinExpr(terms ...*Term) *Expr { return &Expr{Terms: terms} } +func (expr *Expr) CogeneratedExprs() []*Expr { + visited := map[*Expr]struct{}{} + visitCogeneratedExprs(expr, func(e *Expr) bool { + if expr.Equal(e) { + return true + } + if _, ok := visited[e]; ok { + return true + } + visited[e] = struct{}{} + return false + }) + + result := make([]*Expr, 0, len(visited)) + for e := range visited { + result = append(result, e) + } + return result +} + +func (expr *Expr) BaseCogeneratedExpr() *Expr { + if expr.generatedFrom == nil { + return expr + } + return expr.generatedFrom.BaseCogeneratedExpr() +} + +func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { + if parent := expr.generatedFrom; parent != nil { + if stop := f(parent); !stop { + visitCogeneratedExprs(parent, f) + } + } + for _, child := range expr.generates { + if stop := f(child); !stop { + visitCogeneratedExprs(child, f) + } + } +} + func (d *SomeDecl) String() string { if call, ok := d.Symbols[0].Value.(Call); ok { if len(call) == 4 { diff --git a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go b/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go index ea3e907d70..9fa1c6f9b4 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go +++ b/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go @@ -122,27 +122,65 @@ func checkDeprecatedBuiltinsForCurrentVersion(node interface{}) Errors { return checkDeprecatedBuiltins(deprecatedBuiltins, node) } +type RegoCheckOptions struct { + NoDuplicateImports bool + NoRootDocumentOverrides bool + NoDeprecatedBuiltins bool + NoKeywordsAsRuleNames bool + RequireIfKeyword bool + RequireContainsKeyword bool + RequireRuleBodyOrValue bool +} + +func NewRegoCheckOptions() RegoCheckOptions { + // all options are enabled by default + return RegoCheckOptions{ + NoDuplicateImports: true, + NoRootDocumentOverrides: true, + NoDeprecatedBuiltins: true, + NoKeywordsAsRuleNames: true, + RequireIfKeyword: true, + RequireContainsKeyword: true, + RequireRuleBodyOrValue: true, + } +} + // CheckRegoV1 checks the given module or rule for errors that are specific to Rego v1. // Passing something other than an *ast.Rule or *ast.Module is considered a programming error, and will cause a panic. func CheckRegoV1(x interface{}) Errors { + return CheckRegoV1WithOptions(x, NewRegoCheckOptions()) +} + +func CheckRegoV1WithOptions(x interface{}, opts RegoCheckOptions) Errors { switch x := x.(type) { case *Module: - return checkRegoV1Module(x) + return checkRegoV1Module(x, opts) case *Rule: - return checkRegoV1Rule(x) + return checkRegoV1Rule(x, opts) } panic(fmt.Sprintf("cannot check rego-v1 compatibility on type %T", x)) } -func checkRegoV1Module(module *Module) Errors { +func checkRegoV1Module(module *Module, opts RegoCheckOptions) Errors { var errors Errors - errors = append(errors, checkDuplicateImports([]*Module{module})...) - errors = append(errors, checkRootDocumentOverrides(module)...) - errors = append(errors, checkDeprecatedBuiltinsForCurrentVersion(module)...) + if opts.NoDuplicateImports { + errors = append(errors, checkDuplicateImports([]*Module{module})...) + } + if opts.NoRootDocumentOverrides { + errors = append(errors, checkRootDocumentOverrides(module)...) + } + if opts.NoDeprecatedBuiltins { + errors = append(errors, checkDeprecatedBuiltinsForCurrentVersion(module)...) + } + + for _, rule := range module.Rules { + errors = append(errors, checkRegoV1Rule(rule, opts)...) + } + return errors } -func checkRegoV1Rule(rule *Rule) Errors { +func checkRegoV1Rule(rule *Rule, opts RegoCheckOptions) Errors { t := "rule" if rule.isFunction() { t = "function" @@ -150,13 +188,16 @@ func checkRegoV1Rule(rule *Rule) Errors { var errs Errors - if rule.generatedBody && rule.Head.generatedValue { + if opts.NoKeywordsAsRuleNames && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) { + errs = append(errs, NewError(ParseErr, rule.Location, fmt.Sprintf("%s keyword cannot be used for rule name", rule.Head.Name.String()))) + } + if opts.RequireRuleBodyOrValue && rule.generatedBody && rule.Head.generatedValue { errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t)) } - if rule.Body != nil && !rule.generatedBody && !ruleDeclarationHasKeyword(rule, tokens.If) && !rule.Default { + if opts.RequireIfKeyword && rule.Body != nil && !rule.generatedBody && !ruleDeclarationHasKeyword(rule, tokens.If) && !rule.Default { errs = append(errs, NewError(ParseErr, rule.Location, "`if` keyword is required before %s body", t)) } - if rule.Head.RuleKind() == MultiValue && !ruleDeclarationHasKeyword(rule, tokens.Contains) { + if opts.RequireContainsKeyword && rule.Head.RuleKind() == MultiValue && !ruleDeclarationHasKeyword(rule, tokens.Contains) { errs = append(errs, NewError(ParseErr, rule.Location, "`contains` keyword is required for partial set rules")) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go index bb39c18e3b..4664bc5dac 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/term.go +++ b/vendor/github.com/open-policy-agent/opa/ast/term.go @@ -2633,7 +2633,7 @@ func filterObject(o Value, filter Value) (Value, error) { other = v } - err := iterObj.Iter(func(key *Term, value *Term) error { + err := iterObj.Iter(func(key *Term, _ *Term) error { if other.Get(key) != nil { filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) if err != nil { @@ -3091,12 +3091,12 @@ func unmarshalTermSlice(s []interface{}) ([]*Term, error) { buf := []*Term{} for _, x := range s { if m, ok := x.(map[string]interface{}); ok { - if t, err := unmarshalTerm(m); err == nil { + t, err := unmarshalTerm(m) + if err == nil { buf = append(buf, t) continue - } else { - return nil, err } + return nil, err } return nil, fmt.Errorf("ast: unable to unmarshal term") } diff --git a/vendor/github.com/open-policy-agent/opa/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/ast/version_index.json index e50dc6748b..718df220f9 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/version_index.json +++ b/vendor/github.com/open-policy-agent/opa/ast/version_index.json @@ -280,6 +280,13 @@ "PreRelease": "", "Metadata": "" }, + "crypto.x509.parse_and_verify_certificates_with_options": { + "Major": 0, + "Minor": 63, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "crypto.x509.parse_certificate_request": { "Major": 0, "Minor": 21, @@ -679,6 +686,13 @@ "PreRelease": "", "Metadata": "" }, + "json.marshal_with_options": { + "Major": 0, + "Minor": 64, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "json.match_schema": { "Major": 0, "Minor": 50, @@ -1106,6 +1120,13 @@ "PreRelease": "", "Metadata": "" }, + "strings.count": { + "Major": 0, + "Minor": 67, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "strings.render_template": { "Major": 0, "Minor": 59, diff --git a/vendor/github.com/open-policy-agent/opa/ast/visit.go b/vendor/github.com/open-policy-agent/opa/ast/visit.go index 7b0d3b08e7..d83c31149e 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/visit.go +++ b/vendor/github.com/open-policy-agent/opa/ast/visit.go @@ -352,12 +352,12 @@ func (vis *GenericVisitor) Walk(x interface{}) { vis.Walk(x[i]) } case *object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(k, _ *Term) { vis.Walk(k) vis.Walk(x.Get(k)) }) case Object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(k, _ *Term) { vis.Walk(k) vis.Walk(x.Get(k)) }) @@ -492,12 +492,12 @@ func (vis *BeforeAfterVisitor) Walk(x interface{}) { vis.Walk(x[i]) } case *object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(k, _ *Term) { vis.Walk(k) vis.Walk(x.Get(k)) }) case Object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(k, _ *Term) { vis.Walk(k) vis.Walk(x.Get(k)) }) @@ -579,7 +579,7 @@ func (vis *VarVisitor) Vars() VarSet { func (vis *VarVisitor) visit(v interface{}) bool { if vis.params.SkipObjectKeys { if o, ok := v.(Object); ok { - o.Foreach(func(k, v *Term) { + o.Foreach(func(_, v *Term) { vis.Walk(v) }) return true @@ -741,7 +741,7 @@ func (vis *VarVisitor) Walk(x interface{}) { vis.Walk(x[i]) } case *object: - x.Foreach(func(k, v *Term) { + x.Foreach(func(k, _ *Term) { vis.Walk(k) vis.Walk(x.Get(k)) }) diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go index 035a1a9c81..816f5535fc 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go @@ -15,10 +15,13 @@ import ( "fmt" "io" "net/url" + "os" + "path" "path/filepath" "reflect" "strings" + "github.com/gobwas/glob" "github.com/open-policy-agent/opa/ast" astJSON "github.com/open-policy-agent/opa/ast/json" "github.com/open-policy-agent/opa/format" @@ -120,10 +123,28 @@ func NewFile(name, hash, alg string) FileInfo { // Manifest represents the manifest from a bundle. The manifest may contain // metadata such as the bundle revision. type Manifest struct { - Revision string `json:"revision"` - Roots *[]string `json:"roots,omitempty"` - WasmResolvers []WasmResolver `json:"wasm,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` + Revision string `json:"revision"` + Roots *[]string `json:"roots,omitempty"` + WasmResolvers []WasmResolver `json:"wasm,omitempty"` + // RegoVersion is the global Rego version for the bundle described by this Manifest. + // The Rego version of individual files can be overridden in FileRegoVersions. + // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. + // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. + // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, + // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible + // flag and no explicit bundle version should default to v1. + RegoVersion *int `json:"rego_version,omitempty"` + // FileRegoVersions is a map from file paths to Rego versions. + // This allows individual files to override the global Rego version specified by RegoVersion. + FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + + compiledFileRegoVersions []fileRegoVersion +} + +type fileRegoVersion struct { + path glob.Glob + version int } // WasmResolver maps a wasm module to an entrypoint ref. @@ -150,6 +171,15 @@ func (m *Manifest) AddRoot(r string) { } } +func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { + m.Init() + regoVersion := 0 + if v == ast.RegoV1 { + regoVersion = 1 + } + m.RegoVersion = ®oVersion +} + // Equal returns true if m is semantically equivalent to other. func (m Manifest) Equal(other Manifest) bool { @@ -161,6 +191,19 @@ func (m Manifest) Equal(other Manifest) bool { return false } + if m.RegoVersion == nil && other.RegoVersion != nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion == nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { + return false + } + if !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { + return false + } + if !reflect.DeepEqual(m.Metadata, other.Metadata) { return false } @@ -197,7 +240,12 @@ func (m Manifest) Copy() Manifest { func (m Manifest) String() string { m.Init() - return fmt.Sprintf("", m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) + if m.RegoVersion != nil { + return fmt.Sprintf("", + m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) + } + return fmt.Sprintf("", + m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) } func (m Manifest) rootSet() stringSet { @@ -358,10 +406,11 @@ func (m *Manifest) validateAndInjectDefaults(b Bundle) error { // ModuleFile represents a single module contained in a bundle. type ModuleFile struct { - URL string - Path string - Raw []byte - Parsed *ast.Module + URL string + Path string + RelativePath string + Raw []byte + Parsed *ast.Module } // WasmModuleFile represents a single wasm module contained in a bundle. @@ -401,6 +450,7 @@ type Reader struct { name string persist bool regoVersion ast.RegoVersion + followSymlinks bool } // NewReader is deprecated. Use NewCustomReader instead. @@ -489,6 +539,11 @@ func (r *Reader) WithBundleName(name string) *Reader { return r } +func (r *Reader) WithFollowSymlinks(yes bool) *Reader { + r.followSymlinks = yes + return r +} + // WithLazyLoadingMode sets the bundle loading mode. If true, // bundles will be read in lazy mode. In this mode, data files in the bundle will not be // deserialized and the check to validate that the bundle data does not contain paths @@ -543,6 +598,7 @@ func (r *Reader) Read() (Bundle, error) { bundle.Data = map[string]interface{}{} } + var modules []ModuleFile for _, f := range descriptors { buf, err := readFile(f, r.sizeLimitBytes) if err != nil { @@ -583,20 +639,14 @@ func (r *Reader) Read() (Bundle, error) { raw = append(raw, Raw{Path: p, Value: bs}) } - r.metrics.Timer(metrics.RegoModuleParse).Start() - module, err := ast.ParseModuleWithOpts(fullPath, buf.String(), r.ParserOptions()) - r.metrics.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return bundle, err - } - + // Modules are parsed after we've had a chance to read the manifest mf := ModuleFile{ - URL: f.URL(), - Path: fullPath, - Raw: bs, - Parsed: module, + URL: f.URL(), + Path: fullPath, + RelativePath: path, + Raw: bs, } - bundle.Modules = append(bundle.Modules, mf) + modules = append(modules, mf) } else if filepath.Base(path) == WasmFile { bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ URL: f.URL(), @@ -656,6 +706,23 @@ func (r *Reader) Read() (Bundle, error) { } } + // Parse modules + popts := r.ParserOptions() + popts.RegoVersion = bundle.RegoVersion(popts.RegoVersion) + for _, mf := range modules { + modulePopts := popts + if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.RegoVersion); err != nil { + return bundle, err + } + r.metrics.Timer(metrics.RegoModuleParse).Start() + mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) + r.metrics.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return bundle, err + } + bundle.Modules = append(bundle.Modules, mf) + } + if bundle.Type() == DeltaBundleType { if len(bundle.Data) != 0 { return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found") @@ -1012,7 +1079,7 @@ func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { } // FormatModules formats Rego modules -// Modules will be formatted to comply with rego-v1, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). +// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). func (b *Bundle) FormatModules(useModulePath bool) error { return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath) } @@ -1022,8 +1089,15 @@ func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveMo var err error for i, module := range b.Modules { + opts := format.Opts{} + if preserveModuleRegoVersion { + opts.RegoVersion = module.Parsed.RegoVersion() + } else { + opts.RegoVersion = version + } + if module.Raw == nil { - module.Raw, err = format.AstWithOpts(module.Parsed, format.Opts{RegoVersion: version}) + module.Raw, err = format.AstWithOpts(module.Parsed, opts) if err != nil { return err } @@ -1033,13 +1107,6 @@ func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveMo path = module.Path } - opts := format.Opts{} - if preserveModuleRegoVersion { - opts.RegoVersion = module.Parsed.RegoVersion() - } else { - opts.RegoVersion = version - } - module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) if err != nil { return err @@ -1111,6 +1178,65 @@ func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { return mods } +func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { + if v := b.Manifest.RegoVersion; v != nil { + if *v == 0 { + return ast.RegoV0 + } else if *v == 1 { + return ast.RegoV1 + } + } + return def +} + +func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { + b.Manifest.SetRegoVersion(v) +} + +// RegoVersionForFile returns the rego-version for the specified file path. +// If there is no defined version for the given path, the default version def is returned. +// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. +func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { + version, err := b.Manifest.numericRegoVersionForFile(path) + if err != nil { + return def, err + } else if version == nil { + return def, nil + } else if *version == 0 { + return ast.RegoV0, nil + } else if *version == 1 { + return ast.RegoV1, nil + } + return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) +} + +func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { + var version *int + + if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { + m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) + for pattern, v := range m.FileRegoVersions { + compiled, err := glob.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) + } + m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) + } + } + + for _, fv := range m.compiledFileRegoVersions { + if fv.path.Match(path) { + version = &fv.version + break + } + } + + if version == nil { + version = m.RegoVersion + } + return version, nil +} + // Equal returns true if this bundle's contents equal the other bundle's // contents. func (b Bundle) Equal(other Bundle) bool { @@ -1261,13 +1387,33 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) { // will have an empty revision except in the special case where a single bundle is provided // (and in that case the bundle is just returned unmodified.) func Merge(bundles []*Bundle) (*Bundle, error) { + return MergeWithRegoVersion(bundles, ast.RegoV0, false) +} + +// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. +// If more than one bundle is provided, the rego version of the result bundle is set to the provided regoVersion. +// Any Rego files in a bundle of conflicting rego version will be marked in the result's manifest with the rego version +// of its original bundle. If the Rego file already had an overriding rego version, it will be preserved. +// If a single bundle is provided, it will retain any rego version information it already had. If it has none, the +// provided regoVersion will be applied to it. +// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's +// ModuleFile.URL will be used. +func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { if len(bundles) == 0 { return nil, errors.New("expected at least one bundle") } if len(bundles) == 1 { - return bundles[0], nil + result := bundles[0] + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) + if err != nil { + return nil, err + } + result.Manifest.FileRegoVersions = fileRegoVersions + return result, nil } var roots []string @@ -1296,8 +1442,24 @@ func Merge(bundles []*Bundle) (*Bundle, error) { result.WasmModules = append(result.WasmModules, b.WasmModules...) result.PlanModules = append(result.PlanModules, b.PlanModules...) + if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { + if result.Manifest.FileRegoVersions == nil { + result.Manifest.FileRegoVersions = map[string]int{} + } + + fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) + if err != nil { + return nil, err + } + for k, v := range fileRegoVersions { + result.Manifest.FileRegoVersions[k] = v + } + } } + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + if result.Data == nil { result.Data = map[string]interface{}{} } @@ -1311,6 +1473,53 @@ func Merge(bundles []*Bundle) (*Bundle, error) { return &result, nil } +func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { + fileRegoVersions := map[string]int{} + + // we drop the bundle-global rego versions and record individual rego versions for each module. + for _, m := range bundle.Modules { + // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might + // contain the path between OPA working directory and the bundle root. + v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) + if err != nil { + return nil, err + } + // only record the rego version if it's different from one applied globally to the result bundle + if v != regoVersion { + // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path + // to the module inside the merged bundle. + fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() + } + } + + return fileRegoVersions, nil +} + +func bundleRelativePath(m ModuleFile, usePath bool) string { + p := m.RelativePath + if p == "" { + if usePath { + p = m.Path + } else { + p = m.URL + } + } + return p +} + +func bundleAbsolutePath(m ModuleFile, usePath bool) string { + var p string + if usePath { + p = m.Path + } else { + p = m.URL + } + if !path.IsAbs(p) { + p = "/" + p + } + return path.Clean(p) +} + // RootPathsOverlap takes in two bundle root paths and returns true if they overlap. func RootPathsOverlap(pathA string, pathB string) bool { a := rootPathSegments(pathA) @@ -1465,6 +1674,7 @@ func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes in } func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { + // Case for pre-loaded byte buffers, like those from the tarballLoader. if bb, ok := f.reader.(*bytes.Buffer); ok { _ = f.Close() // always close, even on error @@ -1476,6 +1686,37 @@ func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { return *bb, nil } + // Case for *lazyFile readers: + if lf, ok := f.reader.(*lazyFile); ok { + var buf bytes.Buffer + if lf.file == nil { + var err error + if lf.file, err = os.Open(lf.path); err != nil { + return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! + fileSize, _ := fstatFileSize(lf.file) + if fileSize > sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) + } + // Prealloc the buffer for the file read. + buffer := make([]byte, fileSize) + _, err := io.ReadFull(lf.file, buffer) + if err != nil { + return buf, err + } + _ = lf.file.Close() // always close, even on error + + // Note(philipc): Replace the lazyFile reader in the *Descriptor with a + // pointer to the wrapping bytes.Buffer, so that we don't re-read the + // file on disk again by accident. + buf = *bytes.NewBuffer(buffer) + f.reader = &buf + return buf, nil + } + + // Fallback case: var buf bytes.Buffer n, err := f.Read(&buf, sizeLimitBytes) _ = f.Close() // always close, even on error @@ -1489,6 +1730,17 @@ func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { return buf, nil } +// Takes an already open file handle and invokes the os.Stat system call on it +// to determine the file's size. Passes any errors from *File.Stat on up to the +// caller. +func fstatFileSize(f *os.File) (int64, error) { + fileInfo, err := f.Stat() + if err != nil { + return 0, err + } + return fileInfo.Size(), nil +} + func normalizePath(p string) string { return filepath.ToSlash(p) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go index 62ffeeff5f..80b1a87eb1 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go @@ -6,6 +6,7 @@ import ( "compress/gzip" "fmt" "io" + "io/fs" "os" "path/filepath" "sort" @@ -126,6 +127,7 @@ type DirectoryLoader interface { WithFilter(filter filter.LoaderFilter) DirectoryLoader WithPathFormat(PathFormat) DirectoryLoader WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader + WithFollowSymlinks(followSymlinks bool) DirectoryLoader } type dirLoader struct { @@ -135,6 +137,7 @@ type dirLoader struct { filter filter.LoaderFilter pathFormat PathFormat maxSizeLimitBytes int64 + followSymlinks bool } // Normalize root directory, ex "./src/bundle" -> "src/bundle" @@ -181,6 +184,12 @@ func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { return d } +// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory +func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + func formatPath(fileName string, root string, pathFormat PathFormat) string { switch pathFormat { case SlashRooted: @@ -211,8 +220,12 @@ func (d *dirLoader) NextFile() (*Descriptor, error) { // build a list of all files we will iterate over and read, but only one time if d.files == nil { d.files = []string{} - err := filepath.Walk(d.root, func(path string, info os.FileInfo, err error) error { - if info != nil && info.Mode().IsRegular() { + err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { + if info == nil { + return nil + } + + if info.Mode().IsRegular() { if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { return nil } @@ -220,7 +233,15 @@ func (d *dirLoader) NextFile() (*Descriptor, error) { return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) } d.files = append(d.files, path) - } else if info != nil && info.Mode().IsDir() { + } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if info.Mode().IsDir() { if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { return filepath.SkipDir } @@ -305,6 +326,11 @@ func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader return t } +// WithFollowSymlinks is a no-op for tarballLoader +func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { + return t +} + // NextFile iterates to the next file in the directory tree // and returns a file Descriptor for the file. func (t *tarballLoader) NextFile() (*Descriptor, error) { @@ -370,12 +396,13 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) { f := file{name: header.Name} - var buf bytes.Buffer - if _, err := io.Copy(&buf, t.tr); err != nil { + // Note(philipc): We rely on the previous size check in this loop for safety. + buf := bytes.NewBuffer(make([]byte, 0, header.Size)) + if _, err := io.Copy(buf, t.tr); err != nil { return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) } - f.reader = &buf + f.reader = buf t.files = append(t.files, f) } else if header.Typeflag == tar.TypeDir { diff --git a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go index e8767e1bae..a3a0dbf204 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go @@ -26,6 +26,7 @@ type dirLoaderFS struct { root string pathFormat PathFormat maxSizeLimitBytes int64 + followSymlinks bool } // NewFSLoader returns a basic DirectoryLoader implementation @@ -66,6 +67,16 @@ func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) erro return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) } + d.files = append(d.files, path) + } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) } else if dirEntry.Type().IsDir() { if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { @@ -94,6 +105,11 @@ func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { return d } +func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + // NextFile iterates to the next file in the directory tree // and returns a file Descriptor for the file. func (d *dirLoaderFS) NextFile() (*Descriptor, error) { diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.0.json new file mode 100644 index 0000000000..e38edc9482 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.0.json @@ -0,0 +1,4737 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.1.json new file mode 100644 index 0000000000..e38edc9482 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.62.1.json @@ -0,0 +1,4737 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.63.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.63.0.json new file mode 100644 index 0000000000..f3d03f5f4c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.63.0.json @@ -0,0 +1,4781 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.0.json new file mode 100644 index 0000000000..06c04773c1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.0.json @@ -0,0 +1,4826 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.1.json new file mode 100644 index 0000000000..06c04773c1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.64.1.json @@ -0,0 +1,4826 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.65.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.65.0.json new file mode 100644 index 0000000000..06c04773c1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.65.0.json @@ -0,0 +1,4826 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.66.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.66.0.json new file mode 100644 index 0000000000..06c04773c1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.66.0.json @@ -0,0 +1,4826 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.0.json new file mode 100644 index 0000000000..862a4555f9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.0.json @@ -0,0 +1,4843 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.1.json new file mode 100644 index 0000000000..862a4555f9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.67.1.json @@ -0,0 +1,4843 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v0.68.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v0.68.0.json new file mode 100644 index 0000000000..862a4555f9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v0.68.0.json @@ -0,0 +1,4843 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/config/config.go b/vendor/github.com/open-policy-agent/opa/config/config.go index 2e3fa10b77..87ab109113 100644 --- a/vendor/github.com/open-policy-agent/opa/config/config.go +++ b/vendor/github.com/open-policy-agent/opa/config/config.go @@ -39,6 +39,7 @@ type Config struct { DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` Server *struct { Encoding json.RawMessage `json:"encoding,omitempty"` + Decoding json.RawMessage `json:"decoding,omitempty"` Metrics json.RawMessage `json:"metrics,omitempty"` } `json:"server,omitempty"` Storage *struct { diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/format/format.go index c007fafd08..e4c9afaeb7 100644 --- a/vendor/github.com/open-policy-agent/opa/format/format.go +++ b/vendor/github.com/open-policy-agent/opa/format/format.go @@ -28,6 +28,9 @@ type Opts struct { // RegoVersion is the version of Rego to format code for. RegoVersion ast.RegoVersion + + // ParserOptions is the parser options used when parsing the module to be formatted. + ParserOptions *ast.ParserOptions } // defaultLocationFile is the file name used in `Ast()` for terms @@ -43,11 +46,15 @@ func Source(filename string, src []byte) ([]byte, error) { } func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { - parserOpts := ast.ParserOptions{} - if opts.RegoVersion == ast.RegoV1 { - // If the rego version is V1, wee need to parse it as such, to allow for future keywords not being imported. - // Otherwise, we'll default to RegoV0 - parserOpts.RegoVersion = ast.RegoV1 + var parserOpts ast.ParserOptions + if opts.ParserOptions != nil { + parserOpts = *opts.ParserOptions + } else { + if opts.RegoVersion == ast.RegoV1 { + // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. + // Otherwise, we'll default to RegoV0 + parserOpts.RegoVersion = ast.RegoV1 + } } module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) @@ -56,7 +63,12 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { } if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { - errors := ast.CheckRegoV1(module) + checkOpts := ast.NewRegoCheckOptions() + // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. + checkOpts.RequireIfKeyword = false + checkOpts.RequireContainsKeyword = false + checkOpts.RequireRuleBodyOrValue = false + errors := ast.CheckRegoV1WithOptions(module, checkOpts) if len(errors) > 0 { return nil, errors } @@ -1106,15 +1118,44 @@ func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comme func (w *writer) objectWriter() entryWriter { return func(x interface{}, comments []*ast.Comment) []*ast.Comment { entry := x.([2]*ast.Term) + + call, isCall := entry[0].Value.(ast.Call) + + paren := false + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[0].Location.Text[0] == 40 { // Starts with "(" + paren = true + w.write("(") + } + comments = w.writeTerm(entry[0], comments) + if paren { + w.write(")") + } + w.write(": ") + + call, isCall = entry[1].Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[1].Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + return w.writeTerm(entry[1], comments) } } func (w *writer) listWriter() entryWriter { return func(x interface{}, comments []*ast.Comment) []*ast.Comment { - return w.writeTerm(x.(*ast.Term), comments) + t, ok := x.(*ast.Term) + if ok { + call, isCall := t.Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && t.Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + } + + return w.writeTerm(t, comments) } } @@ -1311,7 +1352,10 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L i, offset = skipPast(skipOpen, skipClose, loc) } - for ; i < len(loc.Text) && loc.Text[i] != open; i++ { + for ; i < len(loc.Text); i++ { + if loc.Text[i] == open { + break + } } if i >= len(loc.Text) { @@ -1340,7 +1384,10 @@ func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.L func skipPast(open, close byte, loc *ast.Location) (int, int) { i := 0 - for ; i < len(loc.Text) && loc.Text[i] != open; i++ { + for ; i < len(loc.Text); i++ { + if loc.Text[i] == open { + break + } } state := 1 diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go index 89f3e2a6f4..064649733a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go @@ -95,7 +95,8 @@ func LoadBundleFromDisk(path, name string, bvc *bundle.VerificationConfig) (*bun func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name string, bvc *bundle.VerificationConfig) (*bundle.Bundle, error) { bundlePath := filepath.Join(path, name, "bundle.tar.gz") - if _, err := os.Stat(bundlePath); err == nil { + _, err := os.Stat(bundlePath) + if err == nil { f, err := os.Open(filepath.Join(bundlePath)) if err != nil { return nil, err @@ -116,9 +117,9 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st return &b, nil } else if os.IsNotExist(err) { return nil, nil - } else { - return nil, err } + + return nil, err } // SaveBundleToDisk saves the given raw bytes representing the bundle's content to disk diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv index a9fa9fed21..48c809d656 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv @@ -1000,6 +1000,7 @@ opa_glob_match,std::__1::basic_string\2c\2 opa_glob_match,std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29 opa_glob_match,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 opa_glob_match,glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 +opa_glob_match,std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29 opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29 opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29 opa_glob_match,opa_string @@ -1010,6 +1011,8 @@ void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,abort std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>::operator\28\29\28cache_key\20const&\29\20const std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const +std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\29 +std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,operator\20new\28unsigned\20long\29 std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,abort @@ -1018,6 +1021,8 @@ std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,operator\20new\28unsigned\20long\29 std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__next_prime\28unsigned\20long\29 std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29 +std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 +std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29,operator\20delete\28void*\29 std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const,memcmp std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 @@ -1053,9 +1058,11 @@ compile\28char\20const*\29,abort reuse\28re2::RE2*\29,opa_builtin_cache_get reuse\28re2::RE2*\29,operator\20new\28unsigned\20long\29 reuse\28re2::RE2*\29,opa_builtin_cache_set +reuse\28re2::RE2*\29,re2::RE2::~RE2\28\29 +reuse\28re2::RE2*\29,operator\20delete\28void*\29 +reuse\28re2::RE2*\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\29 reuse\28re2::RE2*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 reuse\28re2::RE2*\29,std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29 -reuse\28re2::RE2*\29,operator\20delete\28void*\29 std::__1::__hash_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::find\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcmp std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,memcmp std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,operator\20new\28unsigned\20long\29 diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm index 7cac13a3b8e9012c0b9da428f6e4abaf997361f4..eb3147b8a89e5ed707639abf7300f14261e1b584 100644 GIT binary patch delta 44025 zcmd442Y8g#@;{zacGJm*6hacn?$Sc%HIKeeUyr@;rHWPnk1kX3orC657wWL%P<|H?E=q_n748sW(jgpLgy%mf7pd$Qcc%o~obL)AdX}ThG-wdZ+$e@6fO4FZ8SW zi2hO^)xYWSj!BN`j+u@eN4}%LQRvv_c-yht@rvVR#}3CH$1cYkj@KPK6CLk3-gWGA zM4j1`c*wCwNSQ6h<{Yb5Drcp$gLpsZXXo6EPbZ8U7v@oJ;R|aZBizat<~lPDMJR~K za{ZOFB0Ndt<@m$viBma0hIbI3=G+ufrqo~DOSW{2Oc~QUHq7_V#7X1ATzNTzA||8V zhNx0Goxf+ORO%2`{vI{S}w0`T{<-`8YDl#Fgy|pY9M>m2|0 zgvAJ+#;Kfy^jp%3hR1w%jYqvtwE=y&epq^DNh4oAEEaUK5nP{MOZ<^@B;6;Hxesj=O&1jM_W&KlrP?upR>Wc5DiYO{@MUHs;r@b~C=@kFK6A#kQQ^YMzZ+!C+I}(v_Egw$|w=h{>l-ZpZ&h zP0!(fO0#D8->q34Kz+Jdb~GN5?e;~975N|Cxh?@!LM;{P}1b9(=}_}saF zL;Rm+KEGlSCg~^1B!Hf?KvL~dMTY4l0@D)xmkaD(- zy|uw*hUbx1j~GjkxX3MYT}N2FZ1IE2J!~~KqShe0ySSV)JKGy=m4X89u$<=EWpX~t zuE;vblf9fJ+lI?4*j67b%vgay7byl1(g1{8 z8ANTuV6>e3CZ*?OPjI#fYBg(V2#=0-3vfodUgXie;NnP&i)>riqJXhrorU4b95KF> z%Z6d&0}f@5FC$jv44se^8(6Xs0N2Kx{^Q%1GFpZyX%?Z4k#gtsm{f)l$}SGfCp?l7 zQobk~p|I-_3iCyQXoh4f|24^MDhKHRIf!s;xAe(?C_0$%H^P11ghp5k`zDmh=|9Qk zI(`i!av~;jwQpiYx3M50Je$2}5*Aa@v~s3RYK*3v6B2UfOpeJ6G?(_udTmMZ=0@Du z4nyH>oLjtkU<7{VbV|s~xjL@E6uD&Fe7y&lxf$CB-m1HDo+{dE_jgvJ@0DP3BHk8sf_JMiZFd(Zz#U`sV5Q|Cy|Db5q2Kceb)}?ydga01cr~L!j z4@{lJ5@*Q%&9qkk9#AvYf5FQSZM8V#X?Df14kBj;(8#&^G<%py&vX;#avpi6H2zO{ zrmP%4F~>DM85OTS!{+MDGoGTNO4DI|aym^fD$0L)`ivAT$57TQrR}oP3~J93aD|deXE`KZg<8RzE z>1Y0ybL^LI@MX{W^cXx&ULB1g{5_;-&d=wxNQ=aCYpghmw*Cci;xTwd{t~Go!zuEp zhFeSu-^Kh+ji$}DL=qizi$}yOw5+^H5e3xMBkHEUYU(jhQ9Y~?+rlv4ndOY$tF*!+ z8r!X#6mT;*6TYP{m`c+t#>pk?LP~B3r>0lbb-iMGp!rX$E$G9&qE1n%4VDVLqAAAw z*(;KPVMIA`ugIr|%89n3kk*$IX{g&%PE<>=!KvzDFlxhE$HHe~jZ{F<RVGb}bzV`CKAXLBOk3M zHbez#@qJ-+(E`vut}g0}Z4^^O)CDFjYlzm6xhXY7L)5)iL)<2|Q&de+Eznb;XUm%6 z36bmHQB!ml=o3?0RKcTTJ<%RrYc~*$>6O}I0E#x(1(-rw(MV*ZMz|d=GkQz}WGqu+ z`*drp!{>746VmX3G!)2{m|CH zcYU$eX;iDBh)S|473P{?yckEc8L3mlYx*{(s|~L~wOph%PaQ33Q*%*@PBs$DFvP^0 zL`^(G>2!TxQ2k0{!-_ght{dv>Cf5v=;ZSB~FEtf!1N7i#X6A{_4cR+17v%tMXmgPm zoy(I5i=LVwVseI6OZKm7E+UzC8(Ww`dio6S*7-yRwQ4EGqNelBqBI?8DawL@7h8&Q z$=0M4Z-%5LS%xC@eAhr=+M_cwzG^E`wmuqTcNg9|R=UG23E%=~DFD&{1l<4w zHRH3ejVNc!u|d6h8-sfHHlibtUERhIuR~jd`lPmIt!-)x^_lCJw~F_fTpzVVkHUrR zL6ri3MtiY|3*WmNcZ{pRpU^?v1S3uzI|?`@G@_&MMcb_n z7O!_SSbX16bV7CG+YJ`SZZ}w@bQ0yN*^=mV9b@eq9fWM@A6Z;4_v<8X0a?~}60Pt! z*9o?xfMPm}v@#YLV8(lEuQEo@t&9tewb81xcph^()!B?&ri-W$R55P&+1hQX{kyt| z3^a?rOEjg;UBnRa26^rf?q~zt9R?M*jXK;RN@rNoVqzw+RxVo=DPKxfrVPV)lkO19 z6SwmeQez^`ddBw7*A$#>JYq8Ubd?oROl9)DVo?p;M+e0!~{=wM;Nby;_b zdC;Yo?-J>lYic)f9^Xpb4dLHTb?z2p6D(&S#sUA`w=*mYVz`~&yjwh4(yqV;R{CmL z0@@w>9X6SUM9O#A?A(gLMjI+)#-3O+ zx2NtAbxLkCvvx=Os%4pRL9(6qh)S5rcl_8!u6xCueQmBs$A*~|SESCd34l5`%m*`N z^{`qtaZ%BzSVhaCid*dp4ESDHmSxAIm%C{1y<#5*e)<7Xjdu1BE=b_v9-B!I3z$#v|42 zC0a+%oUbi-m(rXWj z$3cNc4~g%~+C0TZwJjnn!Z$L@gDq`rSd5BhpQ>daQPH+L(Tk)A&xT;dWWj%!OHcL@ z>%=^&*carSM+5tUxXp8VB_!2DJc1+nCcfgD%~L_7aE>7zcpY%r+$tJ=wE{pu+eY&k zOg??m7mIxx#XT(QWZOb%Ryp?$i(VL^B5-sd$CLw`$3Z&}^lD3RfH}~TuDOPC4h|lH zbihP=otO+065pKzyx~4^8~Ez7jsFG=mVPzMUU?#6$0D+Pf@74CZU<{iRT*qSaN_2$ zhLQ_o8k%#Kh%B$PYNTC*X>ujz&kpu=wJD|+5Lf{LXo>;n%jB4v8_hwJ1{grH0s;sI znu72YN8P;8?HZpWxhB#yu=rF(y1cP&Xa0O9d<DlC?2}^@?ZDCF3xzA|}jU`6Xh>crzLdZt~7f*zRI`GYR3ryhOz{2#1 z;Gw9-IN{B<1~T%2nHjvCW&{Wz>{bp736QYm*v#E44XOdzMI}VG6F@?UjVxON9Kg+% zVZ#-#1I;3NduDOQoo)Ca+2V$|HOn%bdn4?^h_ffsfU$_~RXi}d5A#IwK#`V&<1y3& zYsrfqEX8_ZX*1^;kUM)Qhv9fMoGd3MX0y0~%#Jam8eugkXd^mz2mgYr`$50PepTQA<-;r5fCouD5 zhhx2kdn0(#kPP^H7RRj+u{V+rT~Mh#;btUzgm5#)Fe6(KC^G^c_BzK&jnUe?47~6# z!`$J23n81u^KU?a*-R26y_lr|7scSB7#w67g*HM?8zCdT_`*}hW2gr~fhZ4DklP#| znb|uHha#BaGBX=Afbd1Q!}+_RHIrPLT5I`+1+|FHx9lvqXVEw+($L&YL;;Jym;xWf zSoy(yuwS&k2JOPKjwu2;eD;NImw1eDV}vO(!lyD zA_N@xEZK-D)0T~xVR@oqJ}m4=H!0!H&`Zb1dZ#Za`bFv7%428|Gt z+fJB_70rvWR%_Si*3k>}&15c3<+#uD!uF5Ate1j_2;Zil1-E?On?{;l*NYJh)s;L$is{l!)#S5r#UcL||OAELa`!PIZSo9(-6t%E$WVADY z0|;z8aIz&%wZ1k^p|o8!G^&=376<_r)mn9|_r11(R6g{&1l8y=VrFckk5N0p&|oHv z!17Ru*3QL7+LoRo&|Y;t@a%trZk>!matDac9#+v3 z1B8HI9!dypoK{exE}X3vkBRis&+#}nVxNEHV`8v`IG=a|A(jGq;7QRb`@gA${|`zS zuE~E-3;!RKaQ1&w3;!RKu)}C!f9{i_C3EJ?7_kwLrDMfiSg7a6iqFIrI*=_U;?ZRs zbkP$3&~f514h)0I$M9yR8PgVrON5^}^EeBO~Gj^MFqK7tFY>BxMM zCD!=cE)XA}w?FMUQAJ=bZ5Ctu<@e8BEJiBvlHa*pysO0;I{cz|5XJRYimQ^JYs9Zo ztoBz~C)(jjBXh+r6gS8d$5lCVX5b5pc7^%2TW2u5Qmlf*Rsl8M49noBiJK9nUG6W~ zjIB`oQf`I_ldqj`Z8p@#GXL={Vl)mE{N4ScHqI&NnU}@ycud#<{hH@ry+bVG(iX3S z{CV`_>zJ{hcI*WE^62xO;!Ql3?ZR|Eq|>_)w)v3a-Vn&pp*nAfmFWM;8=@Nb*Sm2B zk>@|LTS!rFEhCZYTR347P_FEQ{>YRoUz|EF%(c#{i7F~tZ%VM|E4|ks)NC)L0+7bN zBS!G^!n4ta;}B+@ztIU$UF749^iGzjq6IRw1Erccg^enn#mj0JlS zD)Lxee-;l_^)notDY(N2`Xt;nm~59d5|4vzM94^A{|u@6TRa{)kMMN5)i(k?Bis>I zUj~kCBv7sRt9V9622rQ&zr&d2(V5?Q!T4kT5basA?!F{$j{T1Dg7W&FpVaHJNcS3{ z0u$JWb-^dL(t1+C1;G|S%=H7Eyd(xj2D?)C%kcE_=)NRbhPq!7y#t>=xq`*$r^{F1 z`sLBUt7g6aJU}MU(yItw<thr`u#kPE3jIRIO3DmVS~2by z#>Tg^7BY?$$HD3+$XenmJ)9sL0PfNR`2Zf55+t(T0y8f{A}7%xv9pY9K*tlMH{%b6 z=5a8?OxzlKc438`LF920$(tl^0wBniSDc~jBzb#-P;JO$$Xjj5z<_v9$euPtH^ytJ zuppIGvh;;dVm;?k)VYL2O2hT|txY!g?Meo#-db2nHbbN4rHrKYDP<&Wb}4ylVknb1 zy@;7aKa>J3^Qc6s;db>@5Po!uOpp1PF>4?bftRLQJ!GnXTMEcstSQUk;8a;rT%ZN1 z5(oTrDpht9KU32*Sx;Q@4@;9JMUk;UTqb1Z^jx}3MVG>K>4X3Ad%EoCG!spw-V@9` zuUSS8dZ|o+4;{r@RHCfBTU?}v%gRLnW3P^~8AdWO-&DFULsq}R@}k8Vk`H4}XGrfq zA=k_eF}Xs6vD?HLtK5dB``m`6zhQT3gLhbxk6jcf+Q|bH-lP#8c>znTKjLlZYOkiv z-T*t2a1t-;*!?-h55mM2;&ZBB4huZbKf0W(F64xXw5q&(5)eEUz+LE;3S3JoD#%50 z;zVjv5ky!`ITfV`4o_i4*+8WSMXfl&ji}L~R72Y`Rb(SftXmZ<`API-6`2O0MM7Y6 zHiM2qw5WPjS+C4nM&_}_a*JwVuuc{;=d7_-^Vq6JewJ1>x_;;q;Y_fg5_1KNx^hkB)o2*z8;5K# z*!1=d!0I4cfR_@ZJ7A5Uv$E!BLc?oTA9OM2V0}(U8%pHP(4~g*4?KQtBx{R5snSjI zu_AE)`7HgI<=}8Y_^(aywp%2-t8uO520UJCCEJHzWp2iX-Ds`$Zw1bUk z*HF1mn*d{yXkeS5t3djN7-$ACNU136S-k>Qn9x3dPTMl&nL3277_7=%& zc^dq*jG~Jjpcq+lyHL-Lm~&8e{&QxtNDuYD*-7qa@BF{U@s4-P78WH+Q`PP=wyNz& zVEOZby4ga4F08Rk#+~ib^Y>YF^ln)6@>H8imDYUt#v0X*oO)GZvUcXP?6Z^C!25?8U>sP99zg$u@ z^6C3=(jPj&e{tkHXe@BOX7~lY5NMi5TYJgZfW@fZV8m~hzFg|F_pevIex&?1zkD+y0PjBk~sZF$VHFaJSLd zeasTDYcl#4gINRM4)%p5`jf&Rmfc*IQW0>bu%U85vkrP#mUo*{PaGoXFYzwKEpfaT zXSR8EtIZF~u7Kq1XC_mjpV2h!`pMQ6&5HNLv62z)I9~EBBOXXyF#rYqWKA6CpX?{A zA=@Cnzf2J4sX~7^aXHkpzpR9f;MD%|A&l`ufB6j__H?isRVJP$Dguei%;oJP09 z*C@-i$A8Nspf{A%ph5B}cEj=>Gqa5xVo<0)M0U+A(r-AQ1V+BQtqOIl#0+2NZeaTeqo7tZIM zzmgaZpJ6GL84mKyq1MAC(o1Q~aD&v6;pqLb|Eu9}S*nzL5@yQAmj$25ps;iIU$^@N~0m`v#H}~gMQIaK%ZgvqPCC9cr+OQxb)Byk7AdR zhXA*kH5yBF-V^dZamD}B6IhgJ zW81+_PnqH7J!KTokCt(a9%Bk`A0yvE%apOQdFh}%!kM47If=z~D9rUI4H+wMu4Wb! zXLVZD*2-XGX!nD=hLcvi$CqPe`&4^TV&8-nXx_BIDcV5vxJtfk`7p+PDccZaPj=A6 zFB~Up*}kng#Y7LYByJujZ$XDC<4lKD<4lKl#sy7#l!Z@}Y3v;@M`Kk!J6_&Y_6?R* zUv}GARt=u$fL+G%H=5?qx$$xe;MzL|HvhC?f<*2wy*#1V+GHo2-3Hf0R!jaG6S46_ ztC~{`Lg<)4uTL^+?!8H}wYO+Rv8}V!2R5}>;+)kaT;Ec|$+Ah&&}PrSbuyrROdm}) ztg>O>K80E3pF0KGv&b5JI~5Y2N2RC9R(L!x&1l|f&q&_izJ~`TJdHh1(7nBVx>3}q zsX&{ahKbIjb5BEQ=KLab-PfKUW@?~CU zIx`cHHkrkT#)%vnPXzmvYbPC@Va#4=P^9WiL#?JWgFG}RwQz>meWnF7vCIF6CeM=9 zY}gPm95x35I~jfqU>}?X%!w|{lC^6rVvPeX0}ozTilv7g=4Y*PlyVw@3A?fXedlbW zluVW5I+6ng*jq5z?*hUzSlwMzfV#5GOOm3Kl& za2N;>lj+GN_>Az_swMKPA{Cgs)Y=*ZgQjd%dps}iq-o1!LM3ZRDEzqYo2Hxq?M3M}Z=8zbFUN?=K)6yqFSSlsS03^rCDG zluqDVk#r$tfX`G;tej+Y&CHdCJctcs&<87JdI>AM<5t_;Vw>xEy1Y^@vftX4EoDpE zv|1*XD6Y+D*DAR~yhvkK!`^;KD^_C@mPhZeMzr%PdDeg|ld16|6u0xyyfa4e;(9Y^{BE>)iFGH4A_d5db*6s#p;=k0Fz@8-k;nr_z6| zA7|CyP^g!tT*n%?)B0qKBEXL&QR2ucQjLTWe@Q= zoye8tizXLDcbbJRx$_X&e49q+8B1>Oq2UX#Z+~@zOrod_veC7nIw!1K*9}mlJ80?# zvzf9xez76Yv7_C`#fB`*?ZSb{=k&lv`8eQyx>26P6)Ju${gWkFvGw1wE6OjuX(4=$ zH|bYk>#P?oxoGK^rNvIV*Do8T8|~?h#1as!J&`#U>4v}Pz}kO_Huz<7utlI=S}l05 z_YxiR%cKr3vq(T0!$t#wPY?G-SX$YOj6>E$43Q&0#=e56n!jbaOh0Vv*stZW(XEht z+YtYMnR;)7+g?bIZNpCGWqN*_Oi!|U^DvP7z3$q zSzd=V3Y-x}_%(wm!u3H+9oJy$U`)e9FjXM{6yD|Ov|ZLI8$w+q5TyhNyv-)?^6j!p zot-?EPas8&&)^IhAPejf8BfU@Va1;~>s4cqhtn^*xE*_9KXrZ?9@|A6GSXiVx3V7eXLsrVRYLOe4<&6SMP2wN;#JK9fDA?fHSjE~b z)HYTQxE3W0DPaBMg6)A-B=gi7&B1ki@@S2P9S0P*;i)f-F6@x;wJmbtWCxgWeiUXD z5ekgux~Q{uz2{U(ugavP0MmUj@M6Vi$eDJ1 z;$Q!&JT0-b7wp91%%it<$|bIiY+g9r7U46e=rnW}wD&$*u?tT7MtXG@RL?s9FT1el z#Cl426EeP!n!IUZObjO?Fccl$DJ#>7H?iSaPdC2>1-_5Qyagw8oqyw72nPUI{@bz@ zD*t%f0CDe@RZ-Ysx9kFBmhG0wL9{^;a*|2>S@N^gfzR;AmhO=>HLL`zk|q8p+EjzIbJleoM7cnhGe-*vC2pZ_CN`~ z{A~#x*azcNNRj*DOBT|@`~P9w4f}aq!nnPu#Q}LGICfbcyEKmk1Z->r*!)ciJ$4Xo z9~9&vnNIH?lr@9E%tZb}U`-F1i2&H*wHjUza5E2QRM!tcwaQD0(-?eMSrQaV{A zHXnt%;ivOQ5uD1U>R-uTD4g|`+)~m4jIA>(3uq0@<&I6t4Ul(M!8y3EQ)Dz0NBo`WiT|Q``vn_B_rA^61O+FwuMH@_7?jNV$L*z*%a30lHuh z^}K)%d+4zXe4y>$dO_}$$au^C9UfBwE&CmwsGnZ>T~@j^kc$djGd)od7UTwJ@>E%G zMp+4yJdsSkwj+$+iLw%=*>+ntBh@#~iMM{L{$XIZ{{tetm!|w7?<{F<+_3_&YzUmL zOZ3AZG85H_e*#Nn68tH*h67p(T(WfB{-i2)cmY2ML%6qC6j() zVZjPPj+{HpwV%IXoLZO7;cLWIS(bjiEGL3{k6)3q@IcXBuqF>&g{D49&s;UB0H0r# z`@;8eU$3O`go=p7s}>_MN7v~E@2QID^kg|znrkx9vzAavc(f7fm4E12hT2QS zne6UErAij3D7Ty{Lo20nrCGzmh}_Ea1~G7G#qNW2eMg6+DrZAiMR-Lks{t_kmNJxT z9Rj-q%}^?`>^Dqr3mOvj@ydbep?qREI(Bar4;tlqFfjJzeK4jBEiz6 zScY(afdpo9glfj9RE*I+Mx}vj5637zSK1h(?!_Z2R<%v}jHwR_IL#X#Y*N|VK-S%W zSk(cIw#EYcJo+P6@mWyy5~?n6?Oj4G!M7_VR155?n#ZYheCrXX+K4acg*Y{(Xgk0e zq)Y^_BMV!6xKCfwMwd#W?k?30IcNBM8y**3>UKbAAFpcS@kG4hK5pF{i z=VgyMv!vr=AH3jr(#UL_$Lv3m3D<{1QY=4)Ms&C|5Zp(VQdJ|s?UkxNwXqS{u>u=W zgiX5Au9Gw{O|gHOlcsRP79C1cJH)38%VGsr^Qtm8?u)F#vgsBc<Vz9)Mo5q%p3H z%Cw**)9q!{`tVPgA}$8pz=p{QHo(VW_b3LF6TVnO_PGXT+30h^cLCc3EKh@LS=v`t zt&O#zTVv4p4AmgnumBL^%%p>e9mp_D_%TB@xA0q60tmbzd|v@7i^IRh5d7=lQJIlu zK(B=KjPd-78Ko-$3V(wh0Rm8xSJk^QRc+Ls@~Xw)2b5?nUX2pu? zj_^ZVkGI|2l~gJ{TM?@65T#a9d4RUJ5@4UCpDL+u#C!B{rg{*Mn<_&U9Hu#y0pb*` zt8AA0$;wbIc~rg%*7YfBQ$-P=9Ic{mD*1^4&n&xo^-bp6QL0=OGXDW}uc{h~PiRh6 zwWFk2+Zx|(fY4WN-?TDbm?AA};Nq3QuQ;XqTBMiU#VEb*)VY(rH-ibDV8o75PICQmk1U92V- z5Sm7 z0H(8$b+Mtns+#mCj{s*L#Lzg20*V}H=o0G~51fYHW*F4_HqDrNIw*tb@(-ciaw#Lx zk;OxQq;5gw!9+(}p2n*mtIDX1E9tlomH&B9|Ce`ylfe7eu3&6FjlB!nI-hFarJ7mu z_d+G|f{VaPv@4&zOWm4gZG2#*SsSD<0g@f1E z!^7}Y^61-l6(6$WTiIwnf||W?l5sBIdRX0V9W%f>$(GQ2CgHijO7*_Jeb;`fWBO8F zjNBU1h~s3d%kkwF#8>-4vM^vjbq5}e`>W2WhL}`Q6d5NcMu<9fyOrqX{)qUa;y*5}qE@n`pA!;}TWBm~LB?WY3 zh&m^x(1D?9C1Y@I4k$W(nCdGhPNL_hsavV>aMi|D$Wj33onPO^tu|X}4oVwZr7@f_ z5av#>LJ+BN1^Gf3-i)QVCfRd|g24)dV_U-Qa$Tm_5%8QZQ}q$56c5v8gsPCVl}BK@ zanm@q7;zY8+z9n7qm?#N&E=*WMuPv7=)g!-rjT zECE-QuOmXZ8NLYs-ZBbI+)9H+sh4=1QlsH*PovJGt*?KLR+D+y@sFxmHVsN}1i_*~ z#$zEgFj$1pz;bm$X%J~>Py$q78tj^FXmEVCMT5U)hthzXmS7r`01e_ES1;SFsZEz2 zR|6TN*>epcHp~qngh5Q(gfIuo!QnAsj`~ZR5PT-Vgy7SF2>2-gc;yL0h}-9>Z9zG3 z=K#lNDD^3|l*ioul$vK#!Nr*&$VCErJ;ns75Noa&a3>Z?g5{(I(U2|O78P8eLOiJO z=@^xfYN;4B<-irNN-)^QFjnznuO*9%$>PGew~bW;7^|&g4JAGr8>9qIq8PbIn-Y-# zS0s%1f@}sb-x3Mj8ek~F5{tw<9`m+rLy1w@YB5t{E~#|fAfdMNm(%0a6ZX?fm!@E& zID_VlR|^@D%n53GP)H1UfC0TZAxIwUI*8DPVmYm$cw}K)n#PV-ku-9W%3`#RO)~VkIw^!cd@ThpV~!ajG2|Nh;HC(;jTU|Q z)>W32a5RDhF?~i&HuPCOS*_<0J4{hCLPw4lv&b>ExHhRsr>9~+lSiedsZ2X;XHt>C zbF9uw)Mc9L$jCzD1Qy!=UfI-mCa7$x(uHSKKgQ^PqH*TX)fqvJ(~8c_G{j1q6(pAA zEfpWPNf7IxHSHsl~ydDU0SL;f#MdcnCmnbUS@e}VWVi0LB zPozkh<;gk@nRvWB-(6^y=Y@qq^|ghUXX{031V3M11UGFyom!-x0QGu3doA_&r1ug% z_pG{=sdw_(AoY+E7NB1Cb0O3-x+H{p*XuM(<;UPq_eRu1drtI%j$!IC^Du7x#n)2L zzE5u}4Oy%nXVku5Z1h^>k`TffT>b^&kkeoj4mU~!2`4SWWiK&=Te~Ev*HUTdQbkO) z$Yq9l{;|u{AW8)O0_>9(mMfziLj;uU7m9M&W*=ppLIr4+7VsT_)~-eMw!uq1*4T zvli2?Tx|atU39a^*FpgEFubUVUGm;ABz*K*hxpp_j2Mb>EW|E2M!NKu^#B*ux~xKXLyZ#J$B5)&>z{m`k-1C?@5FP&WdC|qwz9E){023@ zZ;4{-J39i{uxU`t)Z0oB}qArPnEV8!V- zn7Tz)-Hr{2zM1`h6)LmQ!~*Ofz_N|1H(vkPr1<^FhMNq_`fgJAJqXI!Vs;}tH>v*k zR(Z35d(UP#zj=hhBzkW%T!>AZRa8hplOo2(V!;NsPDQXxw=^2L#jwJ_S+_-TV&9uv z5Y>eF&sSCd35c6!7hvp_`B;s310~;VkYWl{m4B-vpIg3JpoRqAUF_voiPWLcNNvwT zg#G}sP)!gksmoS1*JJh&;5LjaA4f!)gh)3otkTG{M{sQ`;TL*3qf=MEq2hGESqu4o zRkPR#m;9nt{iUyAa3NN~PaGEIyA+uOV*)dNroYZ>Z8vq)cIdN3PdZsW>Dz z8TI=yX3Pga=#lEb?@h>YNo!MwBj7$rs(~?uB3Sj78d7={cQ+RT@&dgo<~^+6(>Rb=OMKJd+?3E z{aE#iH_2Y9II_oYv#_o8`@4UFMTumDwV$e9sb`rYzU&dhV9~)rs3s)&B)Lnv3jO6i zQ;FQJ(dRgdDfEB%xq4nywE}b4(7>8_rLhg#x@ZuoiP~C~zN`FRsh^e~fsMh1vqzwD z^XbSD&?%q9m*!i>m#Rv&V1x_o2njdw#)`L1kuW})xEc~z!;2X8CC*Z}&`V#c%b@4# zqb5?d|EMAU&qvk0>}hoTN)1Hi_ODbI{u*-(hp}6z?J*;;!;cx(Y(1uCqfMLR5XwB7 zdK_yJ2_47PuXwEe+JHOwwFR!q3A1dA2DzQCoKP+X?K){rc5gXpI`ltjb$IKffsL=X zm%yENeD75N{Y|GhhKSo&jNMKBMtL9(O~1hw71t?zqbhed#NfSlx^;yZ%w9S;60oBt z!o$_LB@Td5%QqMltHpQJXI^U*zXWs)68^0!(;*Q2gzXG~sdi&9Ev>u4%=s}(1ucDvkh7541QFrcSo1@JL}aA3O@~2opati#h`0N9o>%X& zkj?!S%4s&O{#D&0wk-TjB}H0IInZAAH*QMpe#7za>-73>%In%`C}eoaCowze=ik&= zEWn|^o6{;}2=$X+O`@ykw8y{qtgg%XaF>44%c=5T>LI&kCEBn2S@Y2q+NaU;7m=-C zGL87aE0|>D+-1h1og1JbY}c~S<)CXBNTmvk{!$1>c7R#FVhim?V7!BRE;BGxF-q!z!(qnz_0qTNt`f(gcW(i##JL5${ z*D7Kqa_h1n7S=U?7Jqn0@U76T9$dnxAxn9?ktDu$2`_c>TaUO-6@tsJA-V$RuOVBV z?b6)D?0VA>8SRWAf*y&nab<9rsS`7uDfRMyMCLZ7t93C*U|bduTW7eDd&V??%$lLF za^4a`fZG4o*n5@k2Gf(FbwDyX)2x}+`ynz>4&4-wwhrAAk7*9w38eefp>K(N(V)-A z=H;Eb0xH`(b#El$PkD4{tr@<$0>*-H5{cP2S-zU>K~dk1A5 zE-aRP+c^CZB)p7EcYuUvxishM&2s4)z~EJv-j2tk@j45p>+5)ZU+MufKky{VynGN) zqjkGL9yW>nLFEAh5fjl~pqEm|sEjaXBie8)eArqfXrS_#;n|R!7m|t3d zhSZapsk#&rQC6kuJMs84RnKSgrRf~ENyEk6;6M&nlJOgf;ci?JjNg$!^q!L~ed)Rh zAdE@Z-Q$Bpm|O26)UXmHSn?f3h1uUqe}^dYm_ zylBU-s$nDI7PrSrGx}5$U*(`(0Yffs(DX%x- z(W(L##QXk<6?D9;WSOFH7(4D6YVRTf^6HI*_1;lQb7E+VO#OO^ASPG}uJ`>hm9bYu zN=o%AAnP_7P(^cSWm*;e2)_MV1r+;$ZmtS8eLxGV>iaSI3srSBv6(#8bbavQ?rM5C z9`9Des+&(gSJUs5KFoAvM-JS>nILC11HBm6d-OqdeH-|lQA4LAZK!DtT@@EQ^sk}s zEy9YI{rmp+YUn84&=daFPuG3jdn!L(%i*`p}ObQ+9n_+83CBwv3q<#*VRPSWv~5Zf3QuaPO_U)hahI zU2hn@9Zgs3>y$`jewpFlq_~EoTfERX`m}x zmd}>}n`B#6+Sx#tDrMn^?8ZQG=&10|%&6NLPMyS$xUO|kK+{lWr4dH7QxV=9b2~q+OFxtR}jI zg^4c~_ZZJ=tY-xnfIOv82Gni>>)@vsn&{f)%?L=)NQCwYaRYOQKobtz+XusNkpXn(7B2{B4`*vR-RyiAYTi z88$q)8a0NU+Dv;(9OhvyD7+o78{_lmJY9`rWL*q_g z+i|B8W~MieJN@tTdLnEK=%b9z&_cNUv6J@UvA7dl0ff9eYY(M#hB%wgoE1I3GZr-N z*XpeK{oJ2A>qRven<*Lpt-s6uk}11;py+tt)g8twG`XLCyomQP3+TNrn$vyGcF~Q~ z4Z%E2FWl&0wjhW*7$*`|z11DMId9Y_-J#7PM43Aw9eLF1PCdkWkEZU^8UJ0S7Hd|y6O)69M)A=w6CD!?Tz;G#o8F$bj0n5lPJHdM!vcKyRM)L z#&UFna?YdL-3-c|%)>vZn_eS&6h%I)M3>jCV4jJ5@XT=m8#4TTJFj`|*e*U;ycmuL zUkY)`&IBLQUGs0yRJaFLu8{iNqbu5SZUXM}@6kPh)P;LA=g!o*7w#7BY`PcjWggY< zp-14cwg*pxj`q-RV>T=A)6cPmXz_r7H}(PJ8?NGSx&D(6z)MACR4-H6u9wb?u@{-S z`JBe|GBjA*ORvE7iMRFE9xTy8z4gg*KN$hQEdp8m>kY57%?LGVl548|vrjlnK2L(sea<0kd6${Ddy)9qM;Q^_XEAo`ETt9ttDD-)&BoPC}p6S(<<5Pz;2xkx%Y{x(*(9 z4g@#y>G6R;GM`==2uBvbur*L~&fO0Kbz8bJNIR*~BiP^O(<6^SO1IL}k6@wxPI-@D z?fyZLgLDgfcg`;WLBpoFMrd)dJK#B%R?|?9vvH^yP{3~ zp++|D8;ZQvtu$jOAj7>LYTThiL*WO;!-mcehS;hBf~n=$Sg~4s2R~YR?F6_$He0h1m>k~o7@^B_Dgx6g0H)QA z!F2cD05-1GEJg$Rz=C}lu)+7Vkvh%#H94L@@b{9;((W)4Qm~e?N17QwHxlf}mB^#? z1LPj1lgnU5aFmE~;opi1w}uHEMBOBbsipPiQ_KtM~+5YrKH-gbq0#yFve9V{~N-f6{QO z_LD~a4SCY2zhzJAXE58!Pw6w5ZK*NZlNglH8w272-yWlPqt}eFM)mC*3u!N)pT=UZ z1BE&clWm?2p9XjEW*Y*bD1pAshG{X?Wsw#IcA3vNC`zWE$6R+}R>@achRoZR{9Ap9{PCvuD*f;@ zu=t(IJ_CmQP7R;Y=P+*Obf|v(WbAY}VtF)pwk{nL84~5EUDNfQWyTqygiIpX550{K z(IK3#J;Ml9w;9kjdGz57y%=zZ&NTCRX{I6g{+Wh8?pdbgvRS$;Jv2)4=pq_*uK!v@h&oTGs)C#CKEI%-od}f@b2m&L*qN2Md0CDm`^{4(R2OV zp3}FmLAbOy#7TmPVDeYRF#KpyMD;M?jdoq6=1a`v?pcCJ>P1?=L=(E*vJ`pqxIS$u zVpMr_XsJGeNA5B`36I*#0XmOHE!T(eX#Kptf=8sIRnGI6)<_?yT9|I@CW{^@HGfWpgaz1QpH62_VI!UN<@>KdB7UiZUX z&#VVtg;XmSx)eF$x%#Hk>v$8*))-q}{L~5WJu!SHLSgH2wWsVE9?1)Dv?~M#X2=&~ z9RYZRuNK_W)46(j{7Vc1KCU~AyLNSLrpbA_QZ;*V1}%}o?<$t(>M+KlEV8f(yvZt||2SCY>5O_LWV#RDEkOyN&t6KyWHiG>Sm%jDt%y zc^S|4o3RxAGzGwJ-AiZN`04wA=Z)+MBfCmxXhIsxD+OL zE7qc)q#taYMiu>fZQuv9fgpF|w;kZlSU)-ivhZn^Iho=LOcLyI1=xd-@;2P%Vujez z-(Po|UMV36pS}#gJD;A|p}UK@^uZ3jpy=BhujmezY&MYRR!@Z&&2m-tRsAyBeD*5$ zSX1XUEg}~fhO#r&e*-fqpmu6%wsG&C}W_UOUD zUMGFxC23Gs0$8c&@1oij!OLhb?XSbQT@&;O;F4 zql&Y*FqeVA*Ks+oDMIAl93xTl{m|C=G-W?DD-^_jn3jAhe*hXRpZXnumqN7q0LIIw z-3Rmq{$-kj`b9i?9@4W=c=nL72NmBl_MrWHdIkzVdCwHa9X5q^4;z>7iNlC#uBJVQ z!HQLs@xI3O4AkU(5Uhaie;?tj0;>7}g2)B*>jyA)ero(7)&#Z!A7XL&>EMTGSxDzT z)D=Q^rWqdr;cYbVBcs@6e*_g)NQXYsZJZ{c?WLAJk*@eQgT^15x+gx?jjIKJ`5V71 z70@eqS1+hnB%i@g-ZTkv`6?az7@Mb8=(mq`ZD5@FiSC{Fx^X~Q$5>v!IUQX330CyW z^zJ8mD<0!NMe`3R|5LbjFVmS%HRtVh{!G^mT#O;H8twg=f5B%kZ#WR!^f{c1dGy2Q z`j+dM-|P!L5l;55FJR`@`j;KSNt(UL_~ME0j_S)HU(bG}FNA#k;h5fu;d75;XFQiK z9%sAhZ~isn9(E(%B#t?0Jik(>;0w&9{-^XH9P)g83g#4Ddwh$ib0N+BmiJ};P2XZe zhSI|CbnW6l#O%bh0{I1W^*fLpL4ebS$-Pg*H^oNtwBChBlkahWHJ7G;k5!RNTfc{l z6wu}G;T#rH*&p;*kn<})=o+EXwFW=x87Y2Vo@Nh(k?{IWE2bgG=05*XcWh~Dk)(hE zZf7H7wM=L8YuR39Rr9&sHg3mvm?CQgVa|SMuoWqG#EavvE9bN~aKh`y39k@V;VQJe z=5Y5WYX6JoLtoq_QS5|Q?Dda2uiJ?VM)5>Pv2L+Go`|u|galqM<+~EOf=*n3Q}ZfS z`xQrR^J(j^px}I3_a{z}&;P3LDe)>}%`-<70#|3<`Bjf#UJ4+z67en?$ljkpuYr_1a{?H`_hMx9k0E4b+|DrRM z{tE(9=x_8F&N0MFT6Ix3!sF;gJy=sDqhu*l<8k42USMLPQJy9UO_-C>_=QC0Bow4*pg8B<1)K&82pDt$4OixWkO7T{A#Tv0|*od32|C zWLjf@vd>xVxq?5TwihJ#;m1@}OR$jpzM~x#y}_2;gBgTZ*U{0ue(eFP zp~2!Ej>KqVlX1wcumEZ~9HpU8+dCYUZO|G`LqSh-I5KKjFz^;Y(2oT_0Uo<3ces(5 z5KI@-M-GQ4!=U5QffeM#nTRDwz@2Z~OL96Y;ZDnDPDi;07D|k*5j7)FMF#tCK`eAS zs#LV}6UMhJy~ezw6IkOFtDs#Q@Dk|ti6ba_Gu*Km`sfB{615CoA!iZ?n6!U;CUJ1` z|HHe+hbTVQQ8np39s#66LoK z$Uco5_v~4_apO^ghYau2cX;1EJqPt3O2X+(rg3r3+h}N_GlnijJLBmUhcko!MN!!p zXCi$O<1FEHKQv_Uh>>(W-q|y?@9^Fu`eqhE?Afz_?-BhCxcJh}G)Pk@?bb^ZtASdUHgzb z+ZpA*s&Vswnc;m04H?!Mh+kK(8wXfo8s;nOtEt|=Dtrk=OnwE^Izk}V}reiXI7r;-yiFQ>6k}f zm2lprbL4X84KbybC7q=*=gX2qhW73`yl=n0kM$fdqUY$|j|}LuK*kjn4(dJfq5cag zyQDMu|10duQvRKQ|ElO zbWe3-R_a)K4ED72l)im4hop2!-l&|a!3j+XA9x%^O*p+awP0@}3YBFyqpm9VXhMh8 z$Cc9Rt`7gn`Tq^|w37Tp*^MSNu?oU@XlrVVP4gFt_tul5@_=^KuxN`?#fWObNUpY3 zrecwZZf%pPXhier+Z9jH%<0zkNhL8Fj#x(64tRGc)x7!T)ZdiqUNdHUXT$;l-P)-n zI#f6o4DM2Dc*C}-c>|^eOFX4`yaA9;we@3Yia4X-VP++!Y*!o_@ z6NtoY-?NHGn_*h^J|$5a)=BSID5M30rtdjL4OqHVaQ+Y2h+490BzHKnekwU}*I@LpF`pJjJ{L#ZmW{6-+}sV#NL!#F??&X#jYsU|tL>5ck~cj99pDJS<` zrPgeKoRjS^-cu5#xZ?qTcZ<4?@eifC%!oV3e=9X41uWBoMR23{m8z1?3+uB#Q0h8a zvkiopD zrOi)6kX#@dlqbDWO#NQXKNEdeC|szL4O(IUBH{RYyhr%;xzj%_>|at|lNMPj9B;`p zt)gY|(s=+eAk*^7rNb-Yr3*}JWqCz2qN{`xm3bi}9E+|NPR!*+g0bkDav8tBB%VZ- zYvW0P);f{s^dJxE^`b7)z&{C@-mnX4kCkiVlBMPHz{3r~g}dl;bLu`tUxc&eD+r1 z07H2(r_?s#An854g6(nRVC50^JsBqgLDRH$#4Y91jo{ye1GiD5Xbz?eX@Q->1wRZZ zW)yF%Td!Slju9&7JQcT&l!re&4U0Hk1nA6?-oINiG)uE#b-6O}JtOMf%aGRC6E`IM zWtQH;3hTkWamSKiB_+HOR1%6l%i4off7(7#BZ}}ThwlBNj)d;Ytmi~6#B?|cxWGcp zoSkw&)QRTBG)s5b4(p((7oDv|u!am`_M07bn9uXY2T4A>zx^P!Z1#d^WMcK9VZqiK zK&IPzQod>UJ0!ozT{=@M>P;=g1(D2MQ>isS+nW;cR;6UJuMgGZYrUx{UXr}in;yW6mF@aa`)2Qm2F?<5 zuo;RNLEY-xH_ePBCBG|@xZeOQGw1f9I=F~`O&@BGJK^^Bp=7*)`1d}Pgtsj3^nq#K z=N1o9hWddx(VX6&nw9I?zk56_5hp&{EM67z&GJj?mLir z;1)V_AU%W|vYr`8e@ghAFASt~e2A;%AnJ{K(k2W-onP|YLG*z5N{n)Cb8ZlIZ1J^d z@`ui+n)Wzb!{YuMt~Hq2iR+>rXAQ>qj2=w2@#tvUVCpPx2%9fuW1mucEqVCjaQrzs zm{P@e_Za>rcTS~|;(L)LTWrIdQ>nlBL3DB3|1Fhjt3Qf;Y8f^+O%!fFj2hzBul~a* zCVm!c*c?GR?;B2u++jVoMQ9{Z+!7@f{7&MwD;4IEBj|p-f)W}*8}aLI8cF>c{3@`0 zz{VkU=s;{*@HZ*>H=a0>GVlQK>5Z9nKdRM%sI0IFN{nx_@VxHQS zFPYSk2aTcTO0kNs77{+Ysq&^VG)Y;Y@{ePvD{dHQmqs1&Es3#d)EW2v%uAyTek&b2 zr)z0&m!%uhDV;}WV1qRy9WMCS3=H~?bn3)!NZvms?-$8ylL22JF&5>rGf-|d%KeGA zWkB$pWL^PNl&GV*I2~pLzb1DaOZVYub>vu##=1&5#H}-lc=uRD!+Jh1vmaAORTOlx z+v0;`Loz7^A3`d~q=h*B|2-4lwvnG4M_IgR95i@brVA6Ea9M@p!P>;7StT%1WGcvmlKjNhid zG8vA)n{Q60<>DFjAwC#E`=x)O`;|Q^w<(||4SZOF+B)Vdzh01pSu1(3`mihZ+h3?H zj-=a6p?r8?j3XoHK78|+dhj<>5J3B7&UpT4D!eZ=m70p@)HxM0{rRUU)Hu=I`9F|+ zfYoWVy4gXsk)vaVW#;tlJ0hAM)-$wdG%5Lces&tkXKiWIsZ_k6cH^QfYRMvt8i*Iw z;gV{~-Lt4m^Ow}dZZH=5g8EQ1=+BIWbw}?qel&|ZibHA-xA6HaSm0&8nni8JVRf9# zZ0n_7;s^|J#S25&Ub8j&vyi-C?cp zgLQ#__ftRd7G77ox&l2Mry~O2=7l;%0O>#J^avomXMm>QP5zPq4H56ECa=lCna!;L z;{H7zoo9i;K^V=bK)VKUa2-V*}A4|eeq+gh7 ziSN`F4pnW8Zsq7n$v5#5ugtBPsr$^JX*tb*&#S|*;}3i?j2ZDqz7nQJ2|tzn97bow z&lm?MID1BDa>A{$)e#z$kZ`-~QXWtTxc#d|uK~BuMk$OJKYG|yB7TEc9-E9==dw-7 z>hEd^u5{@^Ez8=+D2i{%m2JwWw1kAaWnayvNdRllKt5cEH%5y+3lg~Hc3RZKc7;>YEL_EGasOSY(7rJ*6^kI z)E^&2ti6Dmh_#*#oVE}iJ8%KbYrf9Y&>4VCc@7oLPkr zZ1!Yx@nWdjZ82hR3l}ZMaBSsMlG?^~mVnyM z=IQTJ3sxf#kE7g)5|lf=n(B!Y=%GVhUyUW$NzVY6>bM5IKjoR`QsFgnIqd1-QWw@x zD{;m%zJjXCHA<+rIO}=9ZDwi-*4?jq(kgmTjn|c6M9%TC5-4!qGpAySHeS3|xo_fi z&&*1t4y~nO32$szhsxjd6iV1?%O&fmDz{imeR%6S%;gvO`g#iS(e>_XtR2?W8S7~? z&=>qo;-Y6O-&jvJ-+c@UT;h(U)V=vTo@%~a4M&RD*Xi$is>y#i3IppsE-WSfU*d1| AqW}N^ delta 41778 zcmd442Ygk<(>I4gm#`P=Ztu46+odf)H8~ z0$h+T3ev0C6;LdQ3W_2k@c+)9b8l_}{(Ro|d7th9FjNgdu{L9w%WGX+_rtTowi-J?Y14Ew)buOZ3k?@U#^Hc zoIBOOKRcHDyZ>xt_hVCka53YjaY#Z!V5U(TcuMY} zpz3Jg&!GR*K-=8W!S}QCMH_=#*iiUJcv8@mi3HQnIG>wh5VpIF~N5q>n_}_(^4R8$^aiO!uS{R69f4 zPe(e@_UOo`Ynk2lvvcZpsNK&l?9%EucZFseGjv^HXi6dLd@e6bbt{y8&8LOAceQX) zml_?JoLe#KF_i39E;jM9rl$t`>RlDqM^*LDBLqWomz0aky&ly#^fG;7$bjCN5 z8cg|!d8O+X%_+Bpmo6O_A07fae?>4H93ad$L20=s;_KwDjGgkoTxUt#0K`2Dt*n^+T6pHo3WxIqe@N2*5qbX{e!J7y1!aed=!nY zp3T{&+`nr69K6VEl^c0Qzo!2X4F34MVO#vI+UN@YCN^%2znvP_2Gmy@XNTYs_VD3g zwy5Zn$JdlaR!*P$@b{fQHSzb7ez*6ngZC%;-jBaC^!pzD{-=Jg)(`1LJ^RH4;xTsU z7Rd^8?;BYjWuF;Y2}$EeruxUeOE|gkuO82Ndoo_%q`$_#+?`{RK>yFiq_RI=f0<-3 z`sIco?PD)Lk!nfFWV-%b2_*5_DAPESJ9}(& zIa4O=+`$3QwfPlYF?Yw-1Kx#<(4+gm7@;HeHVsZRwh3BnXGbPil) z7Y?)ggoYDkP2!tSr{ornD_7A1YBlR(4jE!+c0U(4mJnu6P(I68lY4nwY}!&vaEM}L zT?c05U?@xC1zJ}dpTw3C^e`(6!xLz%%^fhlbpDA_x!HBgm!NJ80i-)8TH^4tmaF;K@RB-!*%8$+;INHOuw_V8ns=;|1WEzPy9vV0Lbo#Y1LCb9TRn6P&gZgU4y`Y#2Nm z6EM8yw0NHm`7&DW>R0B0dalXDWL+nh%eDp?od=UqSG?ma!3$d5m|!PcvgRSvd|!%M zz3`C3f&qr}@}@@3B#vhyCRr?p7JX1HuqKVbDbN2asVn7_CN-h&1MKMJ91<{1`b$%q z{X0P2MN&7OAuk2+%qt#%WS4=jQ>OZ&fTlh6*^Ien$+&4xVcHuqEe?ODO-qiuMGzg* z2g&88B;@X$Mia-?X^#I;rs{M|47ra_{|{w~UY$N87Bflwidy*l=!!H}STtfqG{eV? zl@;)@ab*w(@;g=~#k^TOqEV#(e_3cEePvZ_oT-=nOhk3=uM5mv$Lc0*QEs=@3HUo^ z^$pBaBlFr4{P)+?#NXf7yo6SUuC2s?>il)(k+gH&SNOYV{R{Y8ZbM(BkKN!vp`{xX z6{=q_3>j++exeG6bMcc+`P8CA3df8#byK(#kFrRnB2_d|BgEv@kP z#Vr-_cgdE@$Ue9wk!JaI+v*}IVQUip-q|)b*S+8cUiez3fsn=kQSh$j=||%<Ejsb#D`Z(bed!Q=IWPY~HRG<* zSME2zev6O8za@s^@%^2VXu{ogZr~*fuo-U;f*4F}2&t?z0Lw3n&R)buC&> z=$a)${V8RQnWC!#LLDcoV{FwBkon8oy0{wYWVMS^Eg)*ElQoL9%8KfKn39Gz55u%e zs;EGPmS>$7na?*s83`HnUGc7IoFmMeAydN{Q*Yk|=@ zQJHm$-bLA%%V1#`cg7Qs+5Itq8CBR5(I(H4L1rV%@bVHGkezBx6*dpmAF9gUjyDGk z;$`#jfS=uF4ydTc>SCnoSHpl6sGj&LYq4U9mey8dDX~@tfY>!7X&`C9<3csIA=s0P z?*-M_gMjvFb@2Blg=K3UVA8AxYYsM>R)gJ-ymxD`N7!anrY5WAsVPviSxq*Yt#0|Ycyy@CTBB-29oA6ot;PBx>3ltaDO6uJU@7r|cAKGF4-EucrSU`;YZV4! z82M^p?NR{VDp}VSKy_6_T^5CmigkTUzgd^9EL9pkM(M@(!A!_kyYKTY4dfcA!S&eC z5>*mXi0WhqTPOlSV*;tQP+N$`@bUXgL20NOHz?J`@*vp3`fMjaPkw;a#KV^&cNaG) z8Y;2wQ{7s#X!T%Y7N%-6gtI1J)oXZHgWokQ)nJNE=$@`?#NJ1hVU0Bp73HT%Q{SaAEv`(%as44(uVyortkyJTVIa-crYzapX29KT-mh{VV#7+*t%+(~ zo3W>?YQhyl)$Ae|JafL8Mz&gWwgh#KG}rCP7OcF*s2aCgEi`VOTd+1@!!<25f!ep! zWv8@cRjnc5zC#eIzG}%Tg1!A)u~Wp}pFGT}*b9jp;zR6#_K-|RFoZZT(?OIVOW7W3 z6u8T`W(8FG%p+*BP~CZi#jpZ*%%ki9Owy`D8wR68z0iiaLM%`k!F_EsfNV&PA+Mrv~hHkj>F6&_>u5Z#pB z56W$mYX2CE1K`ZZSP~vn9%Da)&-AS z=A-iytQWrCeS$q|Hb;4bo@B4Hor*oh5&<~*DfSz_BA*6HHmiD1v$16@jcN-4H-f$G z0mf!^;A!?kq?LgsD0iis>FqnRu7Fa|k=68M3(B^e*;hNFnO&;YGb|;pxONB)EfkfX zBTRQ;+%v3pVD~#GKTF` z9|N+a6N@b#)e|1^4EZ};9qGgl1Kz7$ST*(Dv&;ZveE2NejHKzE*)G5i?aty2(nuB>B%Zuv=5obSwJ?- zmC#H~+B4Pio@@Xu_zmOvVA|P9ivun$mH5-4B<4<#@t z8)UyH6gax78G5d9z~xpM;sp9N^}8QE5op!Pk(BKSwR2CJVWv64ij(X}La!YDkU1zQ z)U3>79c4^7u~5pAv{s;*7YK^jLkT{V_5JYC1iE4~?O{5BB)*LKmX>Mv2PmF_9;ptK zU<3&@c@lFzt$lOw#f7-I!-h-?Vs_?G;#JTIV#rO_znDkntG&nbZxd|?n= zA~mz)fsq5J1!hu$<}cG{gHF;SUiY}TSu9VM3w;?)wL_^#XEyaR|GJ$Kj!@73A(jE0N3!+_R^% z9!$+0&MqKpR2EF40`=esHV^{8egs1abz}q^LUKA%ucRi8WRqfU=YV#^Tj|Y>PswD5 zKZ_gVmE#2#>(MvNRlqV+^>sh$PXz1d6HG4FRsk2F|7f3f)kAwqd&C+IK+T0wLkEDVe_9USCPhy|24eHz^HqmbKIn*5K zJ!ubs#A&VttB`+io_pde>_xJ%{+_I9{NNN#_<>V2;g?U*i5I7^$5@f7Kb5Uxlhnnj ztU=@n>LGR3&jmQ3>oHYp8XJS5+c1qi*>Vo`!X6lJm&M%xW1<@*($K@j(;kxT-sIxp zv+3b$Jsk#sFFXE36v&>*PJ3C)h`QJCVR`>X(2m}+U^=lSrvxPKC=wtUgTb}jAaS7!X3Ag z9g^%V^-Ugo9?31&usfWd>)G#|Eq6CAU=QP|CU0bWk^FEGJ1@#>hm6ZF#PD-%GaXz+ z6*J+anV{OZA#X)$uA8N^h3*4xOr{a@saW$c?t0jjmkl+zz6js}cw70i@jHY&bmwPO=$1?i`g0ITOOdvccaROPvo^0=SW<)}6+p zH_v_RG}})Or}xfcNsi~0k611}b3S4H$kFukS1c{Y^m37axM(VHZCasWxT~LI6+j~O z;Ww;?weg_~WkU*AsoCGKvh09b`3=^Pcb8$lWhz~SBFa;)x@oK+j>U;DP+Kt zF2B}bdgdaqZ@bT2#OSfN-AUhpl~Aes_iQ{KUw_Z4u{Fy0fi*zpBR_!HY_)sPB`n3* z_v-wQ$i1k_T!x$CXSM1w%Vs~jEB%B-JfHX(lP25Xp7slS4JIU!ObgjKOaR6&?#fqL zBcAv*CEH0?3F9igljg_H5EW75e`EF8G4;-GEG_a2%7m1dlxCiq{s-0~U#d%gus+3YF|-9wr`1-U>#RrBB7y^tjy<5ii)WG*$PBY3 zYR4a|q2iW)zYb>lM?A7`u*EFVtQ&}$uu#mp1RPU0P<`Mgo5q8@0M(IO@XqATy-i}` zPPxrm6Z>Rx-ZbbNLJKN@BBeM+Bga1A{&R$s=YS;aQKJR5N*SgF=U)oasE6N$Taci-H4r*1n3&O zzb~&tBm80dbXDH7IJw-Ux<53tVp$e0EygC%hJN|bnmvwIs#G<8aTbdgb^pcvOt$lkwOP z&7Z@=f3#>Ux5vFennMY9#W?0pqj@F4W1#b$%%t+0>3i9uw@LiJcR}M62XLURGU+ z=Q4E2z!km$+ zrY7@5Z+md^MbrYO-FkEBKAg;-B#tjGO+qu?&MUGjYPFqX%SHWZ=TEVpR67T+%l>rd zI6yR{rc~5@TkquY`1-)fT`*&U%kwt=y7%#F$aGzxSR5;>?d5rlN$`3qC>8H}lp~ct z&919aseInQOG~W^S_^0bWLDs{?;*o#Lj_JQw!bT2ph{!ez9P8eFEzfR=8mluHC4Z? zsHqxMiMO=i>4g+#FzD>F>iJ4&bGLe>62AsPdnpZK=Vhh$(mVs=sY=T#@DNQZXH~1p zP(gX_sg-$k#&1thTdVMwP`PeZNMfEEUX^myrmB1%zcWF#tp-9YS8rG24wyzCRO9u? zx*#ei7SG;Tk6^EvW>eSamKl(q&g-L3{n8=xnk1!-L(pFJkC3)u621WWY4Z^;!;=R685AQq`ygPI2E9} ztKY|mk-4+1l`dUMvBl4Qj#s^}q;f+o-35%7;js;bF_A=919pwWUO!BO=Ne(;y;uUU zH4&9lYZ{l5K?WJbe*al@wXxQi;Z69Ts2cbnI`X?}@F0J&7?9SO|GV8)=x(n2eHZ`i zf2Pk}9_IUishUheH-olWx^ZtgOHsV9o@yDUfyiB6U zw(3hfSe1EPPch)<_e*8}cZPn5`|Oka$p6gHcYBUMXcDPnRmxveS!v_9SiqJC9f?((1OC6%h@ zky+D@S08ufvwX)CZO`haC%}JFD^t63LAESjpyB_l8qtraIW0 z?*bN6`hfPos{B5fuk%#vOf6vjGBMknQi=U|eKn@9R;(NPY6(2k7v$18Poenme!K>B z)$)FNo;%Wyw?JaqOg&-V8&p~Kd#2WQR(9?F9(XO(p8h~APo3|NIqG*6KY%}FXz79| zF&9>6Ht5%J19+-kr#iynJ6=so8as!JHe9b6ty1p|;7bpu0LtDCVyFs*&9-WtMNNBpRpAb)L0Yxrx8YwGq8D6Xrj z&rrUIol&-7{60Jy4uko$&;8UekQ*v$+;BdIjH^9kbvF}7=;ks;@F&uWwOatzX;`gT zf`C1ND7(#d&M#9hWp}!FjNpQW?W1g@f9#)W>{CZZ!nnv&sV|^U^VO6Wpccog!!Pi< zwPtHRwz;No3RxQ$K+bn9arjy-u`7k=_l)uY33>BX`ir2?4At>Pj%Xk?>qU)e{)+&4 z%Kht$Fm2GuTBCWnC=D7&VMU7S^{KW}okoNAW~&*aVdl(G8%ArZ+Uo9CV_~a8ZPJv| z`X#=MtyGs^;&El~;X6^WW4N7css>}g#M4!`F&dj5tij zsj4xKw+kblp$@`s2B_!90@RFu1l29_#KjyZxvc5#`3V|{l@n-UQg38?x}GlhqsQmqz{|t5H;+y2ux#v{ZZqldSVjq zjdr(B(#U)|NwePVNxZpb6$hG=I7LsKRZqUcA41{zujs;CU(tm>f5kiF2b;KHe;S4F zo6JW-^wv%04uMxcPzPr7>gvjCn4j{Ln8mB%kv>a97%{69gqEtx zY;A?y8_bcU*Dc!?#PKIDvOl)YN@94mM^DKnu7y-tDrpmKwbuK&yC)K<;yqeVlI1~GHUhvqw!_o#VoS%bE z<*I-;c&)fOgf3VFfJjUQkg$WOY`c*gNvRLHm2SL z4d9y%b^4d&60Z0I95l{Y!pnIUGhLVPw#|rUE@_;k+O#i)eM#C_LNZ7E#9M@_lXlT) zhX`(8ZJn&}p*Pi4KgAHW4%6*;ON9Z71;9br3u6B=7$yR3c&c%rO!&N~hAhT%WWQRtn6I#y z0gtDa@XqPJ48ReT1z_BTExFiY9sq0j!*gZwi@LA`Ce*)=;{VyfwlCx5Y%7WV)Gzbl z>v?P$Zy07y^`=Z-RSTBEwe*+zcp0zgbuN)5#Ds4F|bZmGm=jn036T!o& zyR^u~&|=56KMIdmGneBX9-tM>bD!_Ne*zRquH3z-TA6E-k@UkI#s*1-v z0FK|bLa#7CT%pa)(3P+ny>o%qxy7{~Rllv|vHzIei_KDJ&<1Rgv{k&ZQQUVu+h0{9 zR_Qh@Rd#rl*0@-vRZxMm;fHIsn%AkMh=3uHwAAygd%Q`+E9TNisl}_|XjQ6G9?!*N zdme89y#BzK@4gF#Rk*%hk)wUaOV?<^z%h`bzFor;%a|@6yVz=HTa7oBv6jyRjJ&ma zReWYG-_Dk(S?e%molu+BVNI2%F0SKkz0*0=DfJ8N3=TGjrOkR>Z0veHCCpvV8>Ltt zM?ZUz13sMUe)*iDCCEk%fZ*XEyR0s$PL!a6McAd-Q}_U%2yA4-*HtwUg;4XLo4*4!owJp{MGJzocX&9*4r=`!(B3Q6 zzbW}mJN`>kM)XeZyw8Vl5fs`ZFsw7*DmAPI1&)}UPOG&YYT!;@KT#`8XAranBZZj- zOTiBAgV)+vq4w_N(b=97o_ubWAQ*a9q`AVZ2@An)%QUtS(`f!BbHpWwIRj0-jGcF_ zmZ$;p1L^9hjcs{vJ*@S}1}1>0h8gHSOdzlq5S>6l9h8Uo9y*T`(BB#4X$$39rc-MC zId;AZ)%0DwQcDvPn8le+3N|6o(k0MGN!=rrpwSFM8lf5DMKc)Fl|)n~x(YPaja@t; z%mhZKvuudbcI@V5YnY_*^vXuP3h?Qbu+XDA0(G9>&1)z7j1^)-Vm-sP)f_IhaX0kD z4)w)u9v7>vCW^>nAQCJlyTrmU3!N*ohd*JB90l-zB)aDz_F!}5R;sakpzn*+!aY1b zUQZsFv1y>pf}Vj=$M^7lK?Xn{!UqGl z?t8JO+@hxL<%x+F5ExMiJ0pb_W>H$gNZq>^mfuQsb}vu1Xp1F=rW1RY$J8>hqR<`K z@rX_V5`iA9h#thDCK6Ol8t=FteizR9Rv!9tOmD!gj9`8YQ9xH#ov-KmjI0YmAud)Gex1KE$O_d^i#)JOaIo5ot=88SEm^#=GVHSqvXPdZ>SJcb9H z8UnZ>q+6kzf$>^(;sA8d+iu$j7$~+%RXhm#98hf!k`3npzW*SfiM%HdL2DmSvkqZ- z@wWTjLs>EuJd8!dDiv}>L#T6vS7mEe*CV_G02LnLo}-{>8%^nx3YZJl zsn3t_8jkfQ<~E?kjCGpzX}Q3%Tw{h(nBqEB^(c4Lx+@0&tipf^dr!j&Ll^*J)NC}- zzkC#E{JT2){HRuE|EBet9Q&^nb1yvxtq)dQcbr#G&}J$))Sdvo^?oGMhq`tguA)M9 z^%U>!o^%3TXs8RoEfsAK&$P#&nlsj04Jr8e#}sw$B<7z&m3Rt{fI>Cu)W0;o_Y^hm zM&n&|y!IJhu7Zi5r)jirVO%0bO+N#RPB#>%E}Y@DOJW^IIQ~Ob&59*~wfoS6b(~u9 zA)gTKkpeIZbMcXo61_5Lbe8{!M$p@ zWnARlkhtt3-xz5EhKWTQ1*!!d>-8;f8e`!SWzLWYz$H2wTh*>_F^6NQzk~g{T5bG} zzrZ%CD&O-NNOXS>m!(y)j5W0y+eunNO2OH#oIiki1?trwpg~u=^M2s*ob7SH_ai!i zXU0#w5|7?XRVmWO9_)fD(W=kbt6u&ECd_VC@C$6@-R^_G@JSL?+Fj#&Sp^*eORL{> z%&o!|I>6XSh}YxkfT9m!ON8Lv_Xl4B>_7a2Phk60$Lo0Hsn4!+oXu1F|K!t<*x?3u z#_!XTPLav3fT1pduvFKNd0K6%S8r$~xZ?&q{CVoi4Gh&jW&EXm2vz^WLjRKL@E5n6 zAjp`KuE3#s-oklWau@DX)BmCkaQDH#I5eRdyNlTc!EZSFfJI0>I04afQm( zn5@7f;QJh6zyfuG2?x6JD?uwzaa<&M4o&$P2UP|aeZi2gaamuT=b}7{bhsvCRH6`( z<$feQAw-D3v-=qz&=&wcB*fbo!UUU0R)JDX#QO{>W|~P)V)hwo6IIzcgy{&182HI1 z4hI~dx=yZYRTgDc%|Kxwd@CbRB+U&FL8vjavWTPPKvgwB#KfE<)Uk+RS^6YB+y|I0 zjC1Ol0I}nrKvgaQW_q9~!_KSSl|_UM;R@wf>?y%jEY zAb_-+AtvGRts#=x6}K%yRAJt8`?N&ER0@nfCgSi6Z~)?811*P=jVuY{fO;-cB!HNs zA_dL%??#GFcqEk-En`2VYG8JMM-b$N=2NT6n6lzg6gpT|xbO&x614S`5hdyX&#Wl% zCcYxdi3hQu>QD|H!_Hhe(Sm)XHkT9RZ*3GU;t-GaRJ7hPR&L)WJ zSOQ*21b!V8L>J_^6F_n^$8nY#r>O2U@?#k87uj58`VQH(4GH#*4!E)$8* zCTU1k;_4)UOJdXqNuqhwM+8@IHi7uz7V#a)D1J_5B%|DCs#`J;JfJ2giy9#QmSpi+ zjILFW_oPF7eh!9FHwSukibzgAr#U~|q5Y-6v?uwFh)c}gf0H6ez#7;^eSjNo7avhlz)OF&?l-J(yCj zoobf@BAZcO1eF=rm~1AkyM=> zS3gx2@kk7;qE$=HD&lc`O{@a0Jg*9?h!O0ts#H~6EPIUT5*lT42pn&QaYCh3gY=$M z4^$J6q1cRSVksWgtBdsj@mX~MJg0aK@hv-|eyAay$K%nO;;DqA8d8~JNOl;WFoA|* zAa>T&;(Wa(R7ReCnM~hl95p&na;K& zInafbvrlQ9x}#+1kP^wJ2*ERN!qm%H=CYfgYGNI@1dgc{b;N7diB5_5)JApD&3x6R zt{B9=RY&VWOu61 z)P{N@v65%{4l<|jK;P*b+r{jFA^n6 zBHy{8Xk#LusP;D$QU9GDT>L-SgIMKmB>I4VQW}eAVL6;@DiYM(#v+qlb+aa-3}=OI zBSXB*QDkOQ(ZMWo4en~>I&o@QOo8yiez8OFmNgSG_$s!TK5d5WwR7sbW}-F3vvzZF z2=#9@7ilp+=*a};ZmMym`1Kdns0D;`mFnLD>{Xz?Y9U4&UlTP$XbnOFh&|4JEnzj_ zKxRwv93FqS6p!H1s+E`w&Nh!WDI9Iu?q2h-aI#3t+{GYbV_LiK-CwqbO@R*W-3!Ad z-+i)-ttkVD!ZzaBbZxuS@hvUHnZ~cwI_5*L<&_lkRGv@nZxhuL!`AC=ShFK-?SRht zNL$S)3ncUb*;UX1nTWF3+g?;H_d5ZH!45h|xHNzrIUQIBF&^lA_Nb(R$&R%Vl7l}H z8p-izp(jx$+LnpT?(O75$UGl!Ye_vg@wups%-v6l&i`9A-B+H$2mt2Pzt?@%4%?E192u!90s{eDa8F4E3Iq@SN)4Pa@*6~;(W|RY{ z5T+F8j@y=i53M%3fDQBA30=i%+F*aLn^=jZXJ&T*+U;K49blc7c0vaNVS<&#TGhxJ zX565b$=;ybMZSCMj6e?0XdWauD@wa=zioU5%_7n&Rb&FnN0gLkZ zBedW8ee;GWRoGh~cEbHqA2H9Tgbiy_Pl-zXFfHb*5BrIHwn0tG#9C&93hECe7r4{< ziva3DhtncfwHhEO0<7x*k%q@#C$P%WU&-po09Ziq?F|rZ&D{sgaG(cHOi~E#CVfju z$)bG*iZ+S!Ngk;n2#GCZv-Ej={YIWTF%TSvW(Hyk&r@v%iT3<&$iN`5O`dWO5;)+X zJ|84HQfj@y;yrxc9E^!0PdzY1qFVAlv(F9qtC5#kD) zpw5jHE2za?uZkw>y%)rT{LVyWdr{0Z3aJrT?Q}2`cM)t<9bOYH?l)6IX}jOgUeUdC=1H*iD|_Vf*vF-pWx3(t%a70PX-7Kn&cG@Ljv z9L?m867vb88l%N*D!O+x#&euHH(J!LZnlmtQ|mUfb%(7)%lNQ?Ed|AUyRF z#$%%z_mWs`0ghEo$3S>FHUKAnfyO%{niw0%%S#3>&r{D0Q zfyTmz2Br<KvTOmP2h#_r9M8$WzUK3^Gq5g0Rb~R9zM`s65R0gh zBNN2yR*ww|;efLqs0^Cu?QxhsS!<6f9(B_)ySv8*dK`h)f1D^%;!VwfqU4TpF-KXA zUZ{a4Q*%m_z#tMBXtzs_=to!`%+X!{K1bAUW)45LwF$W(tLs6fyN<8~Sf{#GX8>KC zgEomf5o$+mcA2CpF=diiNR;?#1}M?w6|sXp{(eP_ww_KE{;CKJo}vZ)+h7){*b-4* z9iJ>R39W`x#B?uvXkq~$b#{tqk!@lL79=cv*u%72eTk)SWQ2fd_8?(!1VbFS=>)?g zLbFehJrW~fQV}pH+yOm`M31o7fF7Zq6N@C8eOUv)M740Lh%S~bYQxJ{`=?^4%}zVj zB47ID40t|5QC^iZ(=XD5X~F=1kGz z|CwfT&GKrdsw!Z%=tF4z +C++7F#xG_{^juFdHVN!=`2 zSX#w}k%}YL`pH5uFu;;ubu0(#{&A|$n_fMK^Xi04uqAE5rKOFAJjl{>PzqYlkutG>dW zxeuLmtN(#c2w1S_#C_;QVFCd7`f^RD9m~B+Dqc-|ODLjG;tEYC_v{s7AgiqnG7g)M ztiDhzc%h!O_Qgr=l7mA7=j35bom?qWlVP4237IsO0#+Jix&#FGu(yScvDxkySHUc0 z?;-+8Jb}k==A%`fXjo1!9h{8pb-Q0>(XA=ZHvUla@VD=vH;#kMS_^M@ zo@#dr9+5ot!YP=FMeE>k$5FvkS{rpZgKdGJ(;`B}u8055cZs=6)!iU&-P6K%H_u1U z&0QVsL%`xQq}cs&fiX^(=)}V5;`+c30RzJe;GjeBFj0^;f5`YIuf&^bdjag{e08G$ z%u%3X3xO!M{R&~@;YfNRmdglkFBJ9S%rzJt2PJO{qQb|d_-d1~55Q8n0Hv*!daqxZ0YjPZ8DwpX8E-!50s8=f{RWs^sA`! z&3e&qt@aByi|#=r@CXIpR4BlY4IaUZ@mY|*QbncLvP{B4WgnrP(K&^ovG9xb+GlOsMJ~f>p_sGr5`B$e%wV2|1zJ8H4>fAo~MGzPVIk>l9#J&7IID3KX zh4=O5QT9<0qiXEe1A+Hg_45aCOzqgO{Rv<17ta&T4uEDG)vyC%1|D|~K!@l@v}*c+ zUN&nW6Fw*bq#;5Xs~p56lBWo+d*DIvexzvuV{5jj-l3sbpRI?);J9U4y97fVXz4_D z?2Pp$cV!$FI}sUo`>^PTk?MOy%#PPUNF&f%1nZkLK*QxpB$Q>3iVQUT#8FSf9b+}< zct9rA>kIjpqr?O5F2_&>SS~p(Qo{9CuxB?5=E2e9kcNfo*W-e&L#cN{G-8X?@Do~X zEH@t~Px!hH@Rh8ZoYbxZyq8s-Pl|^B!GCa9`O7CogUWC&z`nE7APQrZ{EcQgu6}9SZK#*owkl-x>57_0MR5 zdHIZ>@P_}J?lkq+=b~GLj`fPiMmugXrD?Lr-Tw>7CxQxgd?~u&k@S_o#mnw`Ux_G6 zedHW=VhY{gox}DB^!}6!BDt#8?XEj?_)~+!sWa_~Yh`D+1WXW7;q6L8cD_1!0XjEd z{c=I1rkHa=5VFxYoR63({p|V>PLwWN`)g69nwPd1D<8F}Y3pJRs-2eULU_9Yvti2D z*g4&xwtp>dfwtSe(e9?NzR@aIUKE|kwCHwG(5mOiMbUviQ@+L4>ju^7Tg}pwzSX00 z@LTa3$~^O(pu^=0zSCp0|2y$JzIJ@C!JYr!1lRZnkSR|UH_|pTfEK=G%x*C(4$w;Z zOL|KerJ~hPvYx}-coffJa0jK8p2Mz~<>Pi%C6|-ch1tvhx)=hpBF_kf^ zje`|IE-UbZ{_uEDLvC1xN4AR&BaHnGT5_FQ`J1Q`tv5Vev8Jwf!Qg_j!NIYMD3ZFC zHa;j)lbQP>rHZ-OFqEu$4fEq>)&81T4wLJ*YtYiUs?P7CcAV)9qd*CGWNDiXA&q3) z}zf`QKiTn4;oRhRc_z$8){4^3>?Cffj!XGs@8SUI3$lUaGZ)m zcaFL)QnO7_A*QhEP=g=_5(=pS9O&{u_jHm#Ah?k(fT5xo8Y&nM9t&}Tke>2@VG@PP zTV+sWK4TKL9LVJWXbuTX{!=6d=+@yccKs>JJz}EbZJE$FJA~1WLTs-om;%ONkYV;n zD|;8vAy0*sgkAvPvOh&`nC>OvO#M`@8=_Up27*X;*w7C#XeOr-^p3w;eFO5h*?szk zIKiUL`T@B6-`};zZ#bES2>`V1rg)|pP#^)K%K?i4kq5li-vmf?{u{f!+tsPRh11xf z38sgZf+Th*c}t9iB>n!Iq+KP1?et-vy|Gj}-Je~PbtsB0{13V0jYhJEPF{(k?(33v zmryi8rT!@&P?wopj&y|oSRl;EpWvOkExTXHc{R8t<}u4ad_hRs6(R%CyAu%d1!J~s z&-l5bJxm=fWmU*gQz_{h=7CZ^2#nTCX-_dn-o=Vd_4heIfC)^Axh!RQGrkMkzISm0 zhRbEcbtpA$a)`O@$nz|AKQWCyxCLIe`_ms{0cl1WcsVv`pEkf$uwMJ!S3YK1-%Swt?n^;y}NLfm) z!9BrYoZ0=Mx#)rYYkITccGR3GFz+mNE((k`ONEq^mCbNDy}@C_p0dNc*X-p|4$tRw z)v28P5bTg1EguDMz8ozn{B2pZtN{#8M9Xb>OpB43n4f-+k)7iY>i%JPf_0d_sW3v| z&YY$C$I6O9S~-9oi`9ZynFqkl;xypdak9K;i%n}~=ti|KP9n6_eJf6mU}P;O$Vv!^ zT$mu~mfb#yy5gopjnT)6vU-xWYxWUmL1f_JXGma1fVW9G<++_n5@*H_sz;OMlZF>h zX37YWDc2Y*8#7NKpG}oA4b9B0rsF4IOd0k4o-Z+DH{U9ET`-k;cY_4 z4A9+&H=ObyJEZoNhlCzdWm07`Q2+5%Sr1>+QbCSG>Z4Sdp5~#t%~-EFm3Ux{exviC z2pY6FUe&As0v}TSD#$E6zN{eI<59DstX|wW=tElfm>O77eo*NI4X-}chL8+Imf-)x z2}UudR$+JNN?=i6BB5EWrH-lB(&P=Csh?U|UPnmD_$o3LkGHGH0z96s3L!Y=o?BH$ z@JgPk0sFVWz<2t^wXmjnfU_y9)v@Z5;wvAkA$OPYVgmLzj=586Vo`^$g9ImDGOO#NoQZLn&or+N-c|Yd< zx~_bMnRc;vKA<6T;$|Uz0UXAxz&)iNN`cwK>&qqqI^+-G(ofWv^}HXm>&uqK!K(PJ zfcmmWj70_xgIvI^gL7_V<{VR(8 z=Dsz*X`pL9*-$om;1qQ_EH*S5d`t?*HO{Utc=MPu(J<+?q2a`AMSUUg(8mSpP(zsz zu#2kWM?A(ilCePu%+ihTR#zXAQEFBr85CaDO51A^S4A|EaaAIb2ouBsvxvM^p0*-g zWgxA_P=v{wZK4$qq}mT?7IKxvhKXeb>89bmg?$oBlSm0z2v78sQ#Tu7$TxdI1>uT+ zSuX0NiBYuKBj7`~UH5G)kFqW9dQG5+5DVGqLD?bRY}jx`K)YP zJ|?W!nsf$0hJu^Q@{YsQyE4MhpV28tYs@soLc^dFp~BlXl}|!)Ry36lMQK&gJS@V8 z7Dei2Q`rTQ(&-_Y>@;Z*We@UgHo!A-PKjn0JS3fEj#3jQ6s$eep@%^EBBJ~`{LIIx zW|;HdRq@T`7|%`j2)3t%iiuHPb6MG!{XcImzYaF7K+SOnTgW0z(0BWj?vAkeobdR^ z2%9hd-I;XH2%9fWPtsSqllS%{{lAE?QLUZ?yW*b5Cvg-NzRD+IAHe(klys=7Pw5=I zQ@rw=r(gx)N~@<}>E$W@w47H4KbwUC!@o4%7dMowep)8l^pL1KOvHJOUPy<7K{z36 zt)M2n;V-PCYyg$ksH3Eqo~Jv?CbT$L9i`q5NPh;ypQoOFMh-Sl@o7I6UD|1`T4C0^ ztTppD&&V`PfMK0vC4x||lYEq(Ih{0wwVh-feeUlBdpys5vy;THD&?uz&a!G_?P;L7 zDhl_0Lqusk2oy{HkO;Cv9f+x<+)(6VUjxP(l=8UFauq8+s%*uv>|U2Mj-0qny2xlN zfSoG2#_?8INFFoK*n0d#ut?}0h>LJwQk@B1B>hT2{jQk)3e^i;Wkrjp^EVui4xqxPk$9igbvwMOd_!$rS zTJ65x6DBJ%lX~mS&b?)tXYFoYW<9GnCM|&MEtlaI!!CWK1L8QYkNk%HsCxCq7cTnj zE8F7hY+v~`AidKMEK;bh_mdSXS)z%b3MD3SF%Xc9TsrDPSJ}H>&C@-0H_MbKXgIoijgU{EOskQaH3pAFTu6a!aV4<~ zxyC`YP-j3%&+_`wShwK1;+JF>Rp%ucjk9~LUy=z(>irU|U!9Z1R?ZzGCt*ll83X1h zP@BidN07x{)+H*wET17|HvVP&P!7D)FT>`?JxFHe)p0nC)Nm|(81TK1#YpCz8cmQtp?g&(N=KBJdF~CPvRXe8>(xB<@kF^FRhQ&wjrC;? z_`g8mgdUxsX*5YIz=BDdBHvAdp`WJ~PnOAQ`zw$+{Z0XizDYm7f@O7{N}Q~_Gi0*< zLIElVcy_89q^?ZXJQF`f_oCwz-HXXnbm7fY%@We7xi8?KfnK`fawXn`DM8f(hX%hpxp-h|kzI zdtGx)%Y^_wF;{nBd#>)lSGk%Xb>`@jg>yi}k#jIvV|6@769X$j1 z##P^dEvOM976ul$nQQJ`sySDoRD-#aZcuxDF7YE$lGVAnGT1y73(FhGA!^FQc*qCs zb*Yw0)=aSW1u3TBZ6XzXByk5R#s!zc@$ya6PY3e*UPfN@6c^dPAd9p(> zH3B@;Kv;yg!MPw~fG-&znlC>EyyOL%AWtpO1R1?R4?4smTJ2sS$(Mg@0T#))XLh0d z3&k^*>f-QT#HiPn;)0_BHS$eOmm_a#0{2~{3oKov;k~y=`#kPZ>&>MyPJO*tHy61? z4@2)I@Fv`|2^5C=#eH}wg06u}_;Oj%y7Lw`K4zZ)?9dE#A{MW@AH#hM*Hwq*TB!|K z4%g;&wQIRlsP@!bvMC<(--5>}PhEIRevZfP6*5N>`IUVo)+s$#0`ELEWu-ihN5{A2 zZ9JY{C7W3L#f|OYh2BNH0Gzs) z)R4_G@zGh-7_mNK>9h=D-^3?8iQf}pil;!7?^Y^HeY{!L_N;u!9UrwtRtlOxEokq2 zGkjK@==t;*FMieiGCVhv(t?p(I|XX?7R*KkYW)@&o95|xNGnnx8b<)6awcs3A&m*@ zr!6wxxBUpWjJeNj&?=)p2ne7pVnqd4HI-uMJZs z2&T-JZp^=v)ZcEofo*iJ-6}`Y@dD>NGN0ecQK#R5GM=Z{cKxWi9qRz3cH9mZ&O9|~ zJ3Q6%)Sc~c93)B`(Zo^RhS z@079T?h&0OFbB}^3rq^ECk-e(E-7DWH{`&A; z`KTwH;fnrUbmF91`{X+)b9o=tSk?SJ$%1C!8J-#gkn6oP+tWK8aD|a` z*|9;l=ZK^qm-IghrH<&EqjC^1IeJvK3bjr)VILU&0mm^Jhm3~DTGg3jVg>((+%op52(ru}jQWt4_Ql81MxZA&2ig8<&jWEJW z&Ck#f=mIOUdwrAOch}oDs>3Nv!})6dDQHqi{wYj5`Re}DP)+%2^l8{9xoX>Kw1*!b zJT0%$&%m6KZ{acQLpc+P?5v&+9ykj&1K*#OGmv=6Omu#v6I*f_HMc`t&3D zbkZ^+C4N{IV+J|GJ$kT^T@2^G^(A0!KL)W1)R2$ihAL1^K7o_6Km~q^>9I()`BaXu zHs7&=#u33R&=ramn*+E{0z$?hWamExfI=lclNEfIowYwh=QgRapJ}zW{4>2db>TDF z!e6_wooe@kEK$%+0Bt_kd9Qvh8#ML)7Ak%@&m;0s@7C@z-0BnoFsUF1th$ZF5x-xA z6g@4_pFsFGgT9cpFvB;b z27+0awDDNwM_3ZCs+WG01F^aD!;dnDmgR$flJ|Lkvl1+r>9FDS4}P-}KcHf|K%fiU z`+maW3#C8!S=Ng4T(?2CWQk%b@)!AFLsPg!Q;NpBa%Xp-=l9#NxGU-KpzSaxcnss= zq!1sB?2KRJUNqD83O2u9Rg14c^j52bSHPk8Wy-6tTMJdKtMV&IS;VihhOgu8;a}y9 z*dk(Fy|x1K2+?dt<3NtYpMI5XGITDjG|kO?tAA$6L=+U}lrGF{+VR^&@UY2FfgtdJ zj{Z%KhC1@y-2FgBTrb|;bz^rIn}Is<9(%iApmOPLUG|=P$_@E2GZkA%Fp0=a^~GN> z$!DrR{(`NvO+9!M`*E|>!JCkqS!(AU>=gU|EuSf~&6HD`Tl{eo)xf{y#oC(s)OP@) zvzz7)Jk25a#4bU9tQN;MxP1Py5?ufF|Gg+i0Zx18h&5-NPOqGj>x2rz#AK?TPM_z z8({u&V{uoM`aZt05cUE1^);l zFVt40vZ;kYC)1jcAWTSE(rcwM{xL2D%JXcPtq|Jh9{UpJIQyLKJxJvL$9;)IYO!JK zeK#ndgTE%75#|X__8j~@JV~A3C#sY?Lcl>-tT6unaq{=^MBBg*!~QzW<)H2Zd$B{O z`APTDtG43_d_PyA<@|@L`wjmO*{klb2>%;W&g8Fggd_O>sCp*)r;68bOKL=lf0A0A z=wA;3U0)^oJH?Z;dF0?B-MbF!)u-2sUHc91I<#Zj*EQ=`zZQ@PF2g#6SJPX;bU?r?PqO4R!o4$?W+&px^NB1BUeNzJT6F3>ngE z*g|?6KBRk(UT<>SfWdugr7z+^!v_r+)^AW&@2-n^aMzKZuO&RB>);VtU7sJ(yLYc) zOL>s~8Z=_SfMq!7T`-MjW4JPiF<&TU?wUGx5?3F!m;NX$HigeoWURj%T zy#c)jZ3gxOhxF|FeD~qKy7%n4g_rUD%o^GAR9HJcICI}meGk_W+UO~i!00MGoAo!EO(3$BZ19NuI zgam{nqJko)kjG_6!s!hjL`9LPE4#X)c%R^X0-h@>3j0;pgw4AD%+!1Js`}NdS6{uZ zuC8t?#T^egsER7bc+K5jW~7|sb@!Q)vdbWOHr(h9cNk5q4`EQ==TMTgz?qs z%6%mvqtyOd(NZ6O)GR6aM$vJ{kwAZ|?3S>MV93$hKB6R~sS-dw>nPwvwHJ(Ij;X4o zb}4gwcUpVXvZB`aEk5EniEQoWG)ZXwv?_w&bfyeoK)u8{)sK zzK{E(vC0#vu2JLj4|jK`twt;ss`OdWWF!EU{HAUHnest@ICj!q)y9!R`jk7ZCF!#< z@%W6uEVQ9oD0Gq+v?75T;kb9INjD4S&PF7#P^4-yVz&s#TjEjc)>Pp*U<+7uTkFD+ zMY1qJDP9q^7PnMXY~L=Nffx@5BT4%X;S95QER?jDw8#Vkcczl46R%G@HU>^D6K$Oq z#F1VuIyq?xlD>!nu@x=aC~~C~pCJ1O5V%Sx)J4*l?F*UNccm(u4bE!O-l+^|0&0y= zcr0@yz|^KR0LeUWIQ6Vro8nkzc%4vCDz5?i8~((4(aBL*bqNp#oxDLfKpAh%tJs(_ zM8Zl&{CB5_l29~i)upWBH-n)~!a-B%p23K0ZM~>c*oMx-gyBnGbZLW7oM5jMh8N0l>_bRc(A%z$7Jn zF;o(^ccgm5Vb8fZVHAhVd&HSG#!U-M-zz#wm~PFwPn?Cp9I*iurY{zqlYPJFXvdR= zWjbt!^?>MX&oN@YG9!eUZjLRP?(7G-OD@R+9f#&pdU&Vk7DcFGB%)S7K$w{@J>}70 zV4!!GNI#tcM4mR?lZ;6CGXwoX(MYNHVbSAszA6Y2RrC?q!{@Ise6gfu`pSb=B58yk z70oW*l}}T-Pae(E9us?0+5MlFrul;J8Oc9v(^Ob9O#Wm8n4fP(Mx`~p<-9&ZOzye}@~ zdkUyG|5!k2t`GQ($#gLe5*#`icdduGNd6t>`IBh~PDOfnGF^c~la5TL3>@ItaSBbu zF^@B+&}BGfYwZ+N{W&*Jp-l0G7{{-SrGDJ5kOuYnQuO#slk%g9f`nmtykGHkg)~5X zEe3EyA$8;Bg>*I^fov+I3$SVAKp~a8jwnMO<@i(@D~^d7s^&ve=_-7uB>e_-%J)qZ zZ=fvsJ=f|RDT(iMr58~te?N`da(El{{>2zhR`1N>y<#p3x;NSS0>6E4YE{?loJfFoAX3&1^gm}X( z%{^R_z4*jnI)|%gPzfLRQMvd7?_f5RQFmT6lX|-U6x=wIa@;2cADT&1-KPX!W>6Mh zGcPmXDmA>?pbNy!+5~R)!(G2H&}9obtr)hrg`e?L2A34mV7|+sG~QNBy>VEG!~I-x zyA*>vzz^o38+{urO{sClM?Qh8_4bT8y?nf2% z0aUR}R*@mg4C1o_;Py2!5?5&Bq-N)_-^9(zN?vSIwrdsdG3gepWnWzapIVJgKs1B@ zQG!a=sB~T_(zP;OiTj*)N~u2{wftU6Td=ctQyC4^)??E%$J@b5j^V}zsXHE-s!=dO zb_0IY4b>zoS~}3XQOn}oS(K5HlcRzwXKdW9sg83>ERaRKb(+Di7NVV>1ZgB*Q1r~A z8mwI1JBy03%-&%(EpFc^Z&uLVkmrUmGy^X^o|+9;-@-#ebO)App9#@Wu}$-Ea|jBi zg{hO+t__t`FU|{7f3ZVLaK9qz%O}D#jGM#MQ{1B^PqU_qds_(%;l5Eg<<1DC?$hR; zmU8$WJbM})rDeSy(7HKF$>!$djIk$1%t?lAd%O=~V-v~eL@Q&oM(ors;;IDPJ1 z#4ha`MU}_t!rl*S-PMq*^oPt*(NN&}WW;pr^a$^Z(;)GvHbjY^VWDRq)BdKYt1N0O zc54NS@>`VSYT`NzHh5ec!SxnU%G&@#w6}p3PjEk*E)`E|la#n+(`90hHk^;yu+9rM z4Zy+qNAM?}*2-j0WO7b|@?3j)eu9Prbe~SpK(SBDmc@Iso}})sW=>C%S3Ij_ILx-A zFcaEk9`AD;PSPd7-OWiF58VABNq6EaG7HLS4S*Ma=kn!qC=T>>(g$Q3$MIIsmEuKh zG!Bb_($njx4cAowre5Ox6%+#;UtS4CUgnBQx)E^vdL@k%uV_)#oFQ|m5MR((GMDmk z0_3Z6$p_@knnzaxdBgMI`fqUkJd}HrPt2pi;w^2UG*EZWy@|R2anbNT?M5Knbj<#a zV+cTR!%e`g_xPim=o&mlDdqWFff!@wgZe<5FCD_e!>fQUN9V(CA8HF)vBC=gt%q7k zUBOF>FgPKvOL#l1;l;%w@=C@up!J3-ENpztuT{}7iPqIX%1=438q{Z8U5#=0IX70r z8ei~B)s&w1rPisH+ezmGK5Ly@@fIlN*IL(B%!#o?FpNmZlD$M|3kWncQ8*1J`Ga^l0iZC+)<9B;%t-tV=v zR=J}>Mj~NGJl-FguDBTv2C(-nL|^~ZbZ{YD%k^_p+O5DLp!!v}(LaIeZ5C0)b)4rc zq7~v-_@*la&H61SulAd^Lh#Aal-@LJG1>T}Sks}~X@bjjqUro4G}R^k&~o|DJMm?z zKedG}o>@wlvU@3wcc0Ywx}}txaY{>f6vHwP)_Tlx)3QxvAhjG+ty`+pw@Wdx)aVMI z9O(u4^>}Y?8oG=~XbbgEG`?yXd78p2F+Bk6cie@E?^Zr%HB|uQ*RCehwTMryMu@VQ zi`QU$+^$crT?24_Vh#N84nDF5;lvV7tEFLm?$i}}TrZcfVwl8K!sA`4*J4Ku9Q2l2 zsus)it~`mUt5~j2RccOQ>fdvPuKKp6biV1g%o2}xB`;@M(reXUD$h4@+aIm;c<uD6;-}!JoU5!H<`fhN5JFo%HTFAe6O!Q(Ml6BA6X3a(t2bhz+Q$0bfR63F>TbGK?9jaqHgp|R26u(Fgcc9rS#V`)@*TJj7!* zVdCD&6`Oz{-WqniNzkLcIQAf_kJ*h=kqa6M-INAb2|{#w*__l{r~hAFX*#cHR*(u+OJ=7n$-tbJ#Q<`159Uar*Q3d6|D^r zFY45dYHaPG^kbw%aMAtr}^W-j_MDgD&oUKyN3PV{!k2>FyQ1 Qo&3Yv7+A0JQ#*+N1K>@Y82|tP diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 3984d8662f..9a5cebec54 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -1139,6 +1139,17 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err instrs = append(instrs, instruction.Br{Index: 0}) break } + case *ir.IsSetStmt: + if loc, ok := stmt.Source.Value.(ir.Local); ok { + instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) + instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) + instrs = append(instrs, instruction.I32Const{Value: opaTypeSet}) + instrs = append(instrs, instruction.I32Ne{}) + instrs = append(instrs, instruction.BrIf{Index: 0}) + } else { + instrs = append(instrs, instruction.Br{Index: 0}) + break + } case *ir.IsUndefinedStmt: instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) instrs = append(instrs, instruction.I32Const{Value: 0}) @@ -1321,7 +1332,7 @@ func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Inst return nil } -func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, loc ir.Location, instrs []instruction.Instruction) []instruction.Instruction { +func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ ir.Location, instrs []instruction.Instruction) []instruction.Instruction { lcopy := c.genLocal() // holds copy of local instrs = append(instrs, instruction.GetLocal{Index: c.local(local)}) diff --git a/vendor/github.com/open-policy-agent/opa/internal/errors/join.go b/vendor/github.com/open-policy-agent/opa/internal/errors/join.go deleted file mode 100644 index 8d8e1f301d..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/errors/join.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errors - -// Join returns an error that wraps the given errors. -// Any nil error values are discarded. -// Join returns nil if errs contains no non-nil values. -// The error formats as the concatenation of the strings obtained -// by calling the Error method of each element of errs, with a newline -// between each string. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - var b []byte - for i, err := range e.errs { - if i > 0 { - b = append(b, '\n') - } - b = append(b, err.Error()...) - } - return string(b) -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go b/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go deleted file mode 100644 index 666f3c783e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/errors/join_go1.20.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build go1.20 - -package errors - -import "errors" - -var Join = errors.Join diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go index 5bdfada5d9..8e035013c2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go @@ -788,7 +788,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) return nil } -func (d *Schema) parseReference(documentNode interface{}, currentSchema *SubSchema) error { +func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error { var ( refdDocumentNode interface{} dsp *schemaPoolDocument diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go index 58d10c1f76..7c86e37245 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go @@ -556,10 +556,10 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface if validationResult.Valid() { validatedOne = true break - } else { - if bestValidationResult == nil || validationResult.score > bestValidationResult.score { - bestValidationResult = validationResult - } + } + + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult } } if !validatedOne { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go index 1af9c4f966..66bd348c47 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go @@ -25,7 +25,7 @@ func init() { ) }) - observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { if fragment.Definition == nil || fragment.TypeCondition == "" || fragment.Definition.IsCompositeType() { return } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go index d9aa8873e0..36b2d057c9 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go @@ -10,7 +10,7 @@ import ( func init() { AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) { // A GraphQL field is only valid if all supplied arguments are defined by that field. - observers.OnField(func(walker *Walker, field *ast.Field) { + observers.OnField(func(_ *Walker, field *ast.Field) { if field.Definition == nil || field.ObjectDefinition == nil { return } @@ -33,7 +33,7 @@ func init() { } }) - observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + observers.OnDirective(func(_ *Walker, directive *ast.Directive) { if directive.Definition == nil { return } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go index 016541e821..9855291e3b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go @@ -15,7 +15,7 @@ func init() { Column int } var seen = map[mayNotBeUsedDirective]bool{} - observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + observers.OnDirective(func(_ *Walker, directive *ast.Directive) { if directive.Definition == nil { addError( Message(`Unknown directive "@%s".`, directive.Name), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go index 94583dac33..8ae1fc33f4 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) { - observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { + observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) { if fragmentSpread.Definition == nil { addError( Message(`Unknown fragment "%s".`, fragmentSpread.Name), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go index f82cfe9f9c..f6ba046a1c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go @@ -13,13 +13,13 @@ func init() { inFragmentDefinition := false fragmentNameUsed := make(map[string]bool) - observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { + observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) { if !inFragmentDefinition { fragmentNameUsed[fragmentSpread.Name] = true } }) - observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { inFragmentDefinition = true if !fragmentNameUsed[fragment.Name] { addError( diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go index c019ae42e0..163ac895b5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { for _, varDef := range operation.VariableDefinitions { if varDef.Used { continue diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go index bda9a90b33..1e207a43e7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go @@ -42,7 +42,7 @@ func init() { * * D) When comparing "between" a set of fields and a referenced fragment, first * a comparison is made between each field in the original set of fields and - * each field in the the referenced set of fields. + * each field in the referenced set of fields. * * E) Also, if any fragment is referenced in the referenced selection set, * then a comparison is made "between" the original set of fields and the diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go index 20364f8ba9..d6d12c4fd2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("ProvidedRequiredArguments", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(walker *Walker, field *ast.Field) { + observers.OnField(func(_ *Walker, field *ast.Field) { if field.Definition == nil { return } @@ -35,7 +35,7 @@ func init() { } }) - observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + observers.OnDirective(func(_ *Walker, directive *ast.Directive) { if directive.Definition == nil { return } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go index 4466d56720..7458c5f6cb 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go @@ -9,11 +9,11 @@ import ( func init() { AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(walker *Walker, field *ast.Field) { + observers.OnField(func(_ *Walker, field *ast.Field) { checkUniqueArgs(field.Arguments, addError) }) - observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + observers.OnDirective(func(_ *Walker, directive *ast.Directive) { checkUniqueArgs(directive.Arguments, addError) }) }) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go index 09968a7416..ecf5a0a82e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) { - observers.OnDirectiveList(func(walker *Walker, directives []*ast.Directive) { + observers.OnDirectiveList(func(_ *Walker, directives []*ast.Directive) { seen := map[string]bool{} for _, dir := range directives { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go index 3da46467e8..c94f3ad27c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go @@ -11,7 +11,7 @@ func init() { AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) { seenFragments := map[string]bool{} - observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { if seenFragments[fragment.Name] { addError( Message(`There can be only one fragment named "%s".`, fragment.Name), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go index f9f69051bf..a93d63bd1e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(walker *Walker, value *ast.Value) { + observers.OnValue(func(_ *Walker, value *ast.Value) { if value.Kind != ast.ObjectValue { return } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go index dc937795bd..dcd404dadf 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go @@ -11,7 +11,7 @@ func init() { AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) { seen := map[string]bool{} - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { if seen[operation.Name] { addError( Message(`There can be only one operation named "%s".`, operation.Name), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go index 94dab3cf13..7a214dbe4c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { seen := map[string]int{} for _, def := range operation.VariableDefinitions { // add the same error only once per a variable. diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go index 88d8f53c45..8858023d4e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go @@ -13,7 +13,7 @@ import ( func init() { AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(walker *Walker, value *ast.Value) { + observers.OnValue(func(_ *Walker, value *ast.Value) { if value.Definition == nil || value.ExpectedType == nil { return } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go index 51cb77ca31..ea4dfcc5ab 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go @@ -9,7 +9,7 @@ import ( func init() { AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { for _, def := range operation.VariableDefinitions { if def.Definition == nil { continue diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index a6405291e7..b75d26ddab 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -893,6 +893,40 @@ func (p *Planner) planExprEvery(e *ast.Expr, iter planiter) error { }) err := p.planTerm(every.Domain, func() error { + // Assert that the domain is a collection type: + // block outer + // block a + // isArray + // br 1: break outer, and continue + // block b + // isObject + // br 1: break outer, and continue + // block c + // isSet + // br 1: break outer, and continue + // br 1: invalid domain, break every + + aBlock := &ir.Block{} + p.appendStmtToBlock(&ir.IsArrayStmt{Source: p.ltarget}, aBlock) + p.appendStmtToBlock(&ir.BreakStmt{Index: 1}, aBlock) + + bBlock := &ir.Block{} + p.appendStmtToBlock(&ir.IsObjectStmt{Source: p.ltarget}, bBlock) + p.appendStmtToBlock(&ir.BreakStmt{Index: 1}, bBlock) + + cBlock := &ir.Block{} + p.appendStmtToBlock(&ir.IsSetStmt{Source: p.ltarget}, cBlock) + p.appendStmtToBlock(&ir.BreakStmt{Index: 1}, cBlock) + + outerBlock := &ir.BlockStmt{Blocks: []*ir.Block{ + { + Stmts: []ir.Stmt{ + &ir.BlockStmt{Blocks: []*ir.Block{aBlock, bBlock, cBlock}}, + &ir.BreakStmt{Index: 1}}, + }, + }} + p.appendStmt(outerBlock) + return p.planScan(every.Key, func(ir.Local) error { p.appendStmt(&ir.ResetLocalStmt{ Target: cond1, @@ -1489,7 +1523,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error } } -func (p *Planner) planNull(null ast.Null, iter planiter) error { +func (p *Planner) planNull(_ ast.Null, iter planiter) error { target := p.newLocal() diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index 79662b10ff..bfb780754b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -17,6 +17,8 @@ import ( "strings" "time" + v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4" + "github.com/open-policy-agent/opa/ast" ) @@ -104,7 +106,7 @@ func SignRequest(req *http.Request, service string, creds Credentials, theTime t signedHeaders := SignV4a(req.Header, req.Method, req.URL, body, service, creds, now) req.Header = signedHeaders } else { - authHeader, awsHeaders := SignV4(req.Header, req.Method, req.URL, body, service, creds, now) + authHeader, awsHeaders := SignV4(req.Header, req.Method, req.URL, body, service, creds, now, false) req.Header.Set("Authorization", authHeader) for k, v := range awsHeaders { req.Header.Add(k, v) @@ -115,14 +117,16 @@ func SignRequest(req *http.Request, service string, creds Credentials, theTime t } // SignV4 modifies a map[string][]string of headers to generate an AWS V4 signature + headers based on the config/credentials provided. -func SignV4(headers map[string][]string, method string, theURL *url.URL, body []byte, service string, awsCreds Credentials, theTime time.Time) (string, map[string]string) { +func SignV4(headers map[string][]string, method string, theURL *url.URL, body []byte, service string, + awsCreds Credentials, theTime time.Time, disablePayloadSigning bool) (string, map[string]string) { // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html // S3 ref. https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html // APIGateway ref. https://docs.aws.amazon.com/apigateway/api-reference/signing-requests/ - bodyHexHash := fmt.Sprintf("%x", sha256.Sum256(body)) now := theTime.UTC() + contentSha256 := getContentHash(disablePayloadSigning, body) + // V4 signing has specific ideas of how it wants to see dates/times encoded dateNow := now.Format("20060102") iso8601Now := now.Format("20060102T150405Z") @@ -134,7 +138,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] // s3 and glacier require the extra x-amz-content-sha256 header. other services do not. if service == "s3" || service == "glacier" { - awsHeaders["x-amz-content-sha256"] = bodyHexHash + awsHeaders[amzContentSha256Key] = contentSha256 } // the security token header is necessary for ephemeral credentials, e.g. from @@ -173,7 +177,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] // include the list of the signed headers headerList := strings.Join(orderedKeys, ";") canonicalReq += headerList + "\n" - canonicalReq += bodyHexHash + canonicalReq += contentSha256 // the "string to sign" is a time-bounded, scoped request token which // is linked to the "canonical request" by inclusion of its SHA-256 hash @@ -202,3 +206,11 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] return authHeader, awsHeaders } + +// getContentHash returns UNSIGNED-PAYLOAD if payload signing is disabled else will compute sha256 from body +func getContentHash(disablePayloadSigning bool, body []byte) string { + if disablePayloadSigning { + return v4.UnsignedPayload + } + return fmt.Sprintf("%x", sha256.Sum256(body)) +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go index f1235a68b6..929f2006e7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go @@ -32,6 +32,7 @@ const ( amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey amzDateKey = v4Internal.AmzDateKey authorizationHeader = "Authorization" + amzContentSha256Key = "x-amz-content-sha256" signingAlgorithm = "AWS4-ECDSA-P256-SHA256" @@ -173,7 +174,7 @@ type httpSigner struct { PayloadHash string } -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { +func (s *httpSigner) setRequiredSigningFields(headers http.Header, _ url.Values) { amzDate := s.Time.Format(timeFormat) headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) @@ -192,7 +193,7 @@ func (s *httpSigner) Build() (signedRequest, error) { // seemingly required by S3/MRAP -- 403 Forbidden otherwise headers.Set("host", req.URL.Host) - headers.Set("x-amz-content-sha256", s.PayloadHash) + headers.Set(amzContentSha256Key, s.PayloadHash) s.setRequiredSigningFields(headers, query) @@ -381,8 +382,7 @@ type signedRequest struct { // SignV4a returns a map[string][]string of headers, including an added AWS V4a signature based on the config/credentials provided. func SignV4a(headers map[string][]string, method string, theURL *url.URL, body []byte, service string, awsCreds Credentials, theTime time.Time) map[string][]string { - bodyHexHash := fmt.Sprintf("%x", sha256.Sum256(body)) - + contentSha256 := getContentHash(false, body) key, err := retrievePrivateKey(awsCreds) if err != nil { return map[string][]string{} @@ -394,7 +394,7 @@ func SignV4a(headers map[string][]string, method string, theURL *url.URL, body [ signer := &httpSigner{ Request: req, - PayloadHash: bodyHexHash, + PayloadHash: contentSha256, ServiceName: service, RegionSet: []string{"*"}, Credentials: key, diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go index e1e8a949f5..e033da7460 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go @@ -8,7 +8,7 @@ import ( "github.com/open-policy-agent/opa/logging" ) -// DoRequestWithClient is a convenience function to get the body of an http response with +// DoRequestWithClient is a convenience function to get the body of an HTTP response with // appropriate error-handling boilerplate and logging. func DoRequestWithClient(req *http.Request, client *http.Client, desc string, logger logging.Logger) ([]byte, error) { resp, err := client.Do(req) @@ -24,22 +24,16 @@ func DoRequestWithClient(req *http.Request, client *http.Client, desc string, lo "headers": resp.Header, }).Debug("Received response from " + desc + " service.") - if resp.StatusCode != 200 { - if logger.GetLevel() == logging.Debug { - body, err := io.ReadAll(resp.Body) - if err == nil { - logger.Debug("Error response with response body: %s", body) - } - } - // could be 404 for role that's not available, but cover all the bases - return nil, errors.New(desc + " HTTP request returned unexpected status: " + resp.Status) - } - body, err := io.ReadAll(resp.Body) if err != nil { // deal with problems reading the body, whatever those might be return nil, errors.New(desc + " HTTP response body could not be read: " + err.Error()) } + if resp.StatusCode != 200 { + logger.Debug("Error response with response body: %s", body) + // could be 404 for role that's not available, but cover all the bases + return nil, errors.New(desc + " HTTP request returned unexpected status: " + resp.Status) + } return body, nil } diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/util.go index 0cb9cffaf5..c8e7fb1bab 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/util.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/v4/util.go @@ -11,14 +11,9 @@ const doubleSpace = " " // contain multiple side-by-side spaces. func StripExcessSpaces(str string) string { var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] + // Trim leading and trailing spaces + str = strings.Trim(str, " ") // Strip multiple spaces. j = strings.Index(str, doubleSpace) diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go index 88e8bc4e0b..b1a5b71577 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go @@ -124,7 +124,7 @@ func LoadPaths(paths []string, processAnnotations bool, caps *ast.Capabilities, fsys fs.FS) (*LoadPathsResult, error) { - return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, processAnnotations, caps, fsys) + return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, processAnnotations, false, caps, fsys) } func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, @@ -134,6 +134,7 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, bvc *bundle.VerificationConfig, skipVerify bool, processAnnotations bool, + followSymlinks bool, caps *ast.Capabilities, fsys fs.FS) (*LoadPathsResult, error) { @@ -161,6 +162,7 @@ func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, WithProcessAnnotation(processAnnotations). WithCapabilities(caps). WithRegoVersion(regoVersion). + WithFollowSymlinks(followSymlinks). AsBundle(path) if err != nil { return nil, err diff --git a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go index 76fb9ee8c8..08f3bf9182 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go +++ b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go @@ -63,6 +63,14 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string] return result, longestLocation } +func Truncate(str string, maxWidth int) string { + if len(str) <= maxWidth { + return str + } + + return str[:maxWidth-3] + "..." +} + func getPathFromFirstSeparator(path string) string { s := filepath.Dir(path) s = strings.TrimPrefix(s, string(filepath.Separator)) diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go index d9c1bda3da..35e6059c72 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go @@ -375,10 +375,10 @@ func readTableSection(r io.Reader, s *module.TableSection) error { return err } else if elem != constant.ElementTypeAnyFunc { return fmt.Errorf("illegal element type") - } else { - table.Type = types.Anyfunc } + table.Type = types.Anyfunc + if err := readLimits(r, &table.Lim); err != nil { return err } diff --git a/vendor/github.com/open-policy-agent/opa/ir/ir.go b/vendor/github.com/open-policy-agent/opa/ir/ir.go index d98b96e2c8..c07670704e 100644 --- a/vendor/github.com/open-policy-agent/opa/ir/ir.go +++ b/vendor/github.com/open-policy-agent/opa/ir/ir.go @@ -364,6 +364,13 @@ type IsObjectStmt struct { Location } +// IsSetStmt represents a dynamic type check on a local variable. +type IsSetStmt struct { + Source Operand `json:"source"` + + Location +} + // IsDefinedStmt represents a check of whether a local variable is defined. type IsDefinedStmt struct { Source Local `json:"source"` diff --git a/vendor/github.com/open-policy-agent/opa/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/ir/pretty.go index a0f2df3bc5..6102c5a911 100644 --- a/vendor/github.com/open-policy-agent/opa/ir/pretty.go +++ b/vendor/github.com/open-policy-agent/opa/ir/pretty.go @@ -25,11 +25,11 @@ type prettyPrinter struct { w io.Writer } -func (pp *prettyPrinter) Before(x interface{}) { +func (pp *prettyPrinter) Before(_ interface{}) { pp.depth++ } -func (pp *prettyPrinter) After(x interface{}) { +func (pp *prettyPrinter) After(_ interface{}) { pp.depth-- } diff --git a/vendor/github.com/open-policy-agent/opa/loader/errors.go b/vendor/github.com/open-policy-agent/opa/loader/errors.go index d6da40a062..b8aafb1421 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/errors.go +++ b/vendor/github.com/open-policy-agent/opa/loader/errors.go @@ -41,7 +41,7 @@ func (e *Errors) add(err error) { type unsupportedDocumentType string func (path unsupportedDocumentType) Error() string { - return string(path) + ": bad document type" + return string(path) + ": document must be of type object" } type unrecognizedFile string diff --git a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.rego b/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.rego deleted file mode 100644 index 08bbfb0ac6..0000000000 --- a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.rego +++ /dev/null @@ -1,3 +0,0 @@ -package bar - -p = true { true } diff --git a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.yaml b/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.yaml deleted file mode 100644 index 8baef1b4ab..0000000000 --- a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/bar/bar.yaml +++ /dev/null @@ -1 +0,0 @@ -abc diff --git a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/baz/qux/qux.json b/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/baz/qux/qux.json deleted file mode 100644 index 19765bd501..0000000000 --- a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/baz/qux/qux.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/foo.json b/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/foo.json deleted file mode 100644 index 3cc0ecbedf..0000000000 --- a/vendor/github.com/open-policy-agent/opa/loader/internal/embedtest/foo.json +++ /dev/null @@ -1 +0,0 @@ -[1,2,3] diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go index 1ce0e7868f..461639ed19 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/loader.go +++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go @@ -81,7 +81,7 @@ type Filter = filter.LoaderFilter // GlobExcludeName excludes files and directories whose names do not match the // shell style pattern at minDepth or greater. func GlobExcludeName(pattern string, minDepth int) Filter { - return func(abspath string, info fs.FileInfo, depth int) bool { + return func(_ string, info fs.FileInfo, depth int) bool { match, _ := filepath.Match(pattern, info.Name()) return match && depth >= minDepth } @@ -103,6 +103,7 @@ type FileLoader interface { WithCapabilities(*ast.Capabilities) FileLoader WithJSONOptions(*astJSON.Options) FileLoader WithRegoVersion(ast.RegoVersion) FileLoader + WithFollowSymlinks(bool) FileLoader } // NewFileLoader returns a new FileLoader instance. @@ -114,14 +115,15 @@ func NewFileLoader() FileLoader { } type fileLoader struct { - metrics metrics.Metrics - filter Filter - bvc *bundle.VerificationConfig - skipVerify bool - files map[string]bundle.FileInfo - opts ast.ParserOptions - fsys fs.FS - reader io.Reader + metrics metrics.Metrics + filter Filter + bvc *bundle.VerificationConfig + skipVerify bool + files map[string]bundle.FileInfo + opts ast.ParserOptions + fsys fs.FS + reader io.Reader + followSymlinks bool } // WithFS provides an fs.FS to use for loading files. You can pass nil to @@ -188,6 +190,12 @@ func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { return fl } +// WithFollowSymlinks enables or disables following symlinks when loading files +func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { + fl.followSymlinks = followSymlinks + return fl +} + // All returns a Result object loaded (recursively) from the specified paths. func (fl fileLoader) All(paths []string) (*Result, error) { return fl.Filtered(paths, nil) @@ -239,6 +247,10 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { return nil, err } + if err := checkForUNCPath(path); err != nil { + return nil, err + } + var bundleLoader bundle.DirectoryLoader var isDir bool if fl.reader != nil { @@ -246,9 +258,11 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { } else { bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) } + if err != nil { return nil, err } + bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) br := bundle.NewCustomReader(bundleLoader). WithMetrics(fl.metrics). @@ -257,6 +271,7 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { WithProcessAnnotations(fl.opts.ProcessAnnotation). WithCapabilities(fl.opts.Capabilities). WithJSONOptions(fl.opts.JSONOptions). + WithFollowSymlinks(fl.followSymlinks). WithRegoVersion(fl.opts.RegoVersion) // For bundle directories add the full path in front of module file names @@ -293,6 +308,10 @@ func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle. return nil, false, err } + if err := checkForUNCPath(path); err != nil { + return nil, false, err + } + var fi fs.FileInfo if fsys != nil { fi, err = fs.Stat(fsys, path) @@ -486,7 +505,7 @@ func AsBundle(path string) (*bundle.Bundle, error) { // AllRegos returns a Result object loaded (recursively) with all Rego source // files from the specified paths. func AllRegos(paths []string) (*Result, error) { - return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, depth int) bool { + return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, _ int) bool { return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt) }) } @@ -522,7 +541,7 @@ func Paths(path string, recurse bool) (paths []string, err error) { if err != nil { return nil, err } - err = filepath.Walk(path, func(f string, info os.FileInfo, err error) error { + err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { if !recurse { if path != f && path != filepath.Dir(f) { return filepath.SkipDir @@ -653,12 +672,18 @@ func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Resu return } + if err := checkForUNCPath(path); err != nil { + errors.add(err) + return + } + var info fs.FileInfo if fsys != nil { info, err = fs.Stat(fsys, path) } else { info, err = os.Stat(path) } + if err != nil { errors.add(err) return @@ -794,3 +819,19 @@ func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { } return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) } + +// isUNC reports whether path is a UNC path. +func isUNC(path string) bool { + return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) +} + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +func checkForUNCPath(path string) error { + if isUNC(path) { + return fmt.Errorf("UNC path read is not allowed: %s", path) + } + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/logging/logging.go b/vendor/github.com/open-policy-agent/opa/logging/logging.go index 3ce76da468..7a1edfb563 100644 --- a/vendor/github.com/open-policy-agent/opa/logging/logging.go +++ b/vendor/github.com/open-policy-agent/opa/logging/logging.go @@ -3,6 +3,7 @@ package logging import ( "context" "io" + "net/http" "github.com/sirupsen/logrus" ) @@ -126,21 +127,37 @@ func (l *StandardLogger) GetLevel() Level { // Debug logs at debug level func (l *StandardLogger) Debug(fmt string, a ...interface{}) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Debug(fmt) + return + } l.logger.WithFields(l.getFields()).Debugf(fmt, a...) } // Info logs at info level func (l *StandardLogger) Info(fmt string, a ...interface{}) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Info(fmt) + return + } l.logger.WithFields(l.getFields()).Infof(fmt, a...) } // Error logs at error level func (l *StandardLogger) Error(fmt string, a ...interface{}) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Error(fmt) + return + } l.logger.WithFields(l.getFields()).Errorf(fmt, a...) } // Warn logs at warn level func (l *StandardLogger) Warn(fmt string, a ...interface{}) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Warn(fmt) + return + } l.logger.WithFields(l.getFields()).Warnf(fmt, a...) } @@ -194,10 +211,15 @@ const reqCtxKey = requestContextKey("request-context-key") // RequestContext represents the request context used to store data // related to the request that could be used on logs. type RequestContext struct { - ClientAddr string - ReqID uint64 - ReqMethod string - ReqPath string + ClientAddr string + ReqID uint64 + ReqMethod string + ReqPath string + HTTPRequestContext HTTPRequestContext +} + +type HTTPRequestContext struct { + Header http.Header } // Fields adapts the RequestContext fields to logrus.Fields. @@ -221,6 +243,17 @@ func FromContext(ctx context.Context) (*RequestContext, bool) { return requestContext, ok } +const httpReqCtxKey = requestContextKey("http-request-context-key") + +func WithHTTPRequestContext(parent context.Context, val *HTTPRequestContext) context.Context { + return context.WithValue(parent, httpReqCtxKey, val) +} + +func HTTPRequestContextFromContext(ctx context.Context) (*HTTPRequestContext, bool) { + requestContext, ok := ctx.Value(httpReqCtxKey).(*HTTPRequestContext) + return requestContext, ok +} + const decisionCtxKey = requestContextKey("decision_id") func WithDecisionID(parent context.Context, id string) context.Context { diff --git a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/plugins/plugins.go index 283fcc4591..bacdd15076 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go +++ b/vendor/github.com/open-policy-agent/opa/plugins/plugins.go @@ -7,6 +7,7 @@ package plugins import ( "context" + "errors" "fmt" mr "math/rand" "sync" @@ -24,7 +25,6 @@ import ( "github.com/open-policy-agent/opa/hooks" bundleUtils "github.com/open-policy-agent/opa/internal/bundle" cfg "github.com/open-policy-agent/opa/internal/config" - "github.com/open-policy-agent/opa/internal/errors" initload "github.com/open-policy-agent/opa/internal/runtime/init" "github.com/open-policy-agent/opa/keys" "github.com/open-policy-agent/opa/loader" @@ -286,11 +286,10 @@ func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, e return nil, err } return a, nil - - } else { - t := DefaultTriggerMode - return &t, nil } + + t := DefaultTriggerMode + return &t, nil } type namedplugin struct { diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go index f69496f08b..11e72001a2 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go @@ -40,6 +40,8 @@ import ( const ( // Default to s3 when the service for sigv4 signing is not specified for backwards compatibility awsSigv4SigningDefaultService = "s3" + // Default to urn:ietf:params:oauth:client-assertion-type:jwt-bearer for ClientAssertionType when not specified + defaultClientAssertionType = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" ) // DefaultTLSConfig defines standard TLS configurations based on the Config @@ -281,6 +283,9 @@ type oauth2ClientCredentialsAuthPlugin struct { AdditionalParameters map[string]string `json:"additional_parameters,omitempty"` AWSKmsKey *awsKmsKeyConfig `json:"aws_kms,omitempty"` AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` + ClientAssertionType string `json:"client_assertion_type"` + ClientAssertion string `json:"client_assertion"` + ClientAssertionPath string `json:"client_assertion_path"` signingKey *keys.Config signingKeyParsed interface{} @@ -294,16 +299,13 @@ type oauth2Token struct { ExpiresAt time.Time } -func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, claims map[string]interface{}, signingKey interface{}) (*string, error) { +func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, extClaims map[string]interface{}, signingKey interface{}) (*string, error) { now := time.Now() - baseClaims := map[string]interface{}{ + claims := map[string]interface{}{ "iat": now.Unix(), "exp": now.Add(10 * time.Minute).Unix(), } - if claims == nil { - claims = make(map[string]interface{}) - } - for k, v := range baseClaims { + for k, v := range extClaims { claims[k] = v } @@ -462,14 +464,30 @@ func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client, return nil, errors.New("token_url required to use https scheme") } if ap.GrantType == grantTypeClientCredentials { - if ap.AWSKmsKey != nil && (ap.ClientSecret != "" || ap.SigningKeyID != "") || - (ap.ClientSecret != "" && ap.SigningKeyID != "") { - return nil, errors.New("can only use one of client_secret, signing_key or signing_kms_key for client_credentials") + clientCredentialExists := make(map[string]bool) + clientCredentialExists["client_secret"] = ap.ClientSecret != "" + clientCredentialExists["signing_key"] = ap.SigningKeyID != "" + clientCredentialExists["aws_kms"] = ap.AWSKmsKey != nil + clientCredentialExists["client_assertion"] = ap.ClientAssertion != "" + clientCredentialExists["client_assertion_path"] = ap.ClientAssertionPath != "" + + var notEmptyVarCount int + + for _, credentialSet := range clientCredentialExists { + if credentialSet { + notEmptyVarCount++ + } } - if ap.SigningKeyID == "" && ap.AWSKmsKey == nil && (ap.ClientID == "" || ap.ClientSecret == "") { - return nil, errors.New("client_id and client_secret required") + + if notEmptyVarCount == 0 { + return nil, errors.New("please provide one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path required") + } + + if notEmptyVarCount > 1 { + return nil, errors.New("can only use one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path") } - if ap.AWSKmsKey != nil { + + if clientCredentialExists["aws_kms"] { if ap.AWSSigningPlugin == nil { return nil, errors.New("aws_kms and aws_signing required") } @@ -478,6 +496,24 @@ func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client, if err != nil { return nil, err } + } else if clientCredentialExists["client_assertion"] { + if ap.ClientAssertionType == "" { + ap.ClientAssertionType = defaultClientAssertionType + } + if ap.ClientID == "" { + return nil, errors.New("client_id and client_assertion required") + } + } else if clientCredentialExists["client_assertion_path"] { + if ap.ClientAssertionType == "" { + ap.ClientAssertionType = defaultClientAssertionType + } + if ap.ClientID == "" { + return nil, errors.New("client_id and client_assertion_path required") + } + } else if clientCredentialExists["client_secret"] { + if ap.ClientID == "" { + return nil, errors.New("client_id and client_secret required") + } } } @@ -505,12 +541,34 @@ func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) ( if err != nil { return nil, err } - body.Add("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + body.Add("client_assertion_type", defaultClientAssertionType) body.Add("client_assertion", *authJwt) if ap.ClientID != "" { body.Add("client_id", ap.ClientID) } + } else if ap.ClientAssertion != "" { + if ap.ClientAssertionType == "" { + ap.ClientAssertionType = defaultClientAssertionType + } + if ap.ClientID != "" { + body.Add("client_id", ap.ClientID) + } + body.Add("client_assertion_type", ap.ClientAssertionType) + body.Add("client_assertion", ap.ClientAssertion) + } else if ap.ClientAssertionPath != "" { + if ap.ClientAssertionType == "" { + ap.ClientAssertionType = defaultClientAssertionType + } + bytes, err := os.ReadFile(ap.ClientAssertionPath) + if err != nil { + return nil, err + } + if ap.ClientID != "" { + body.Add("client_id", ap.ClientID) + } + body.Add("client_assertion_type", ap.ClientAssertionType) + body.Add("client_assertion", strings.TrimSpace(string(bytes))) } } @@ -541,6 +599,7 @@ func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) ( if err != nil { return nil, err } + defer response.Body.Close() bodyRaw, err := io.ReadAll(response.Body) if err != nil { @@ -692,7 +751,7 @@ func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) { return client, nil } -func (ap *clientTLSAuthPlugin) Prepare(req *http.Request) error { +func (ap *clientTLSAuthPlugin) Prepare(_ *http.Request) error { return nil } @@ -700,12 +759,14 @@ func (ap *clientTLSAuthPlugin) Prepare(req *http.Request) error { type awsSigningAuthPlugin struct { AWSEnvironmentCredentials *awsEnvironmentCredentialService `json:"environment_credentials,omitempty"` AWSMetadataCredentials *awsMetadataCredentialService `json:"metadata_credentials,omitempty"` + AWSAssumeRoleCredentials *awsAssumeRoleCredentialService `json:"assume_role_credentials,omitempty"` AWSWebIdentityCredentials *awsWebIdentityCredentialService `json:"web_identity_credentials,omitempty"` AWSProfileCredentials *awsProfileCredentialService `json:"profile_credentials,omitempty"` AWSService string `json:"service,omitempty"` AWSSignatureVersion string `json:"signature_version,omitempty"` + host string ecrAuthPlugin *ecrAuthPlugin kmsSignPlugin *awsKMSSignPlugin @@ -796,6 +857,11 @@ func (ap *awsSigningAuthPlugin) awsCredentialService() awsCredentialService { chain.addService(ap.AWSEnvironmentCredentials) } + if ap.AWSAssumeRoleCredentials != nil { + ap.AWSAssumeRoleCredentials.logger = ap.logger + chain.addService(ap.AWSAssumeRoleCredentials) + } + if ap.AWSWebIdentityCredentials != nil { ap.AWSWebIdentityCredentials.logger = ap.logger chain.addService(ap.AWSWebIdentityCredentials) @@ -820,6 +886,13 @@ func (ap *awsSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { return nil, err } + url, err := url.Parse(c.URL) + if err != nil { + return nil, err + } + + ap.host = url.Host + if ap.logger == nil { ap.logger = c.logger } @@ -832,6 +905,13 @@ func (ap *awsSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { } func (ap *awsSigningAuthPlugin) Prepare(req *http.Request) error { + if ap.host != req.URL.Host { + // Return early if the host does not match. + // This can happen when the OCI registry responded with a redirect to another host. + // For instance, ECR redirects to S3 and the ECR auth header should not be included in the S3 request. + return nil + } + switch ap.AWSService { case "ecr": return ap.ecrAuthPlugin.Prepare(req) @@ -851,6 +931,7 @@ func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error cfgs := map[bool]int{} cfgs[ap.AWSEnvironmentCredentials != nil]++ cfgs[ap.AWSMetadataCredentials != nil]++ + cfgs[ap.AWSAssumeRoleCredentials != nil]++ cfgs[ap.AWSWebIdentityCredentials != nil]++ cfgs[ap.AWSProfileCredentials != nil]++ @@ -864,6 +945,12 @@ func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error } } + if ap.AWSAssumeRoleCredentials != nil { + if err := ap.AWSAssumeRoleCredentials.populateFromEnv(); err != nil { + return err + } + } + if ap.AWSWebIdentityCredentials != nil { if err := ap.AWSWebIdentityCredentials.populateFromEnv(); err != nil { return err diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go index 3e78b1fad2..349441c838 100644 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go +++ b/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go @@ -30,8 +30,10 @@ const ( ec2DefaultTokenPath = "http://169.254.169.254/latest/api/token" // ref. https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html - ecsDefaultCredServicePath = "http://169.254.170.2" - ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + ecsDefaultCredServicePath = "http://169.254.170.2" + ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + ecsFullPathEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsAuthorizationTokenEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" // ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html stsDefaultDomain = "amazonaws.com" @@ -211,7 +213,12 @@ func (cs *awsMetadataCredentialService) urlForMetadataService() (string, error) // otherwise, check environment to see if it looks like we're in an ECS // container (with implied role association) if isECS() { - return ecsDefaultCredServicePath + os.Getenv(ecsRelativePathEnvVar), nil + // first check if the relative env var exists; if so we use that otherwise we + // use the "full" var + if _, relativeExists := os.LookupEnv(ecsRelativePathEnvVar); relativeExists { + return ecsDefaultCredServicePath + os.Getenv(ecsRelativePathEnvVar), nil + } + return os.Getenv(ecsFullPathEnvVar), nil } // if there's no role name and we don't appear to have a path to the // ECS container service, then the configuration is invalid @@ -267,6 +274,16 @@ func (cs *awsMetadataCredentialService) refreshFromService(ctx context.Context) return errors.New("unable to construct metadata HTTP request: " + err.Error()) } + // if using the AWS_CONTAINER_CREDENTIALS_FULL_URI variable, we need to associate the token + // to the request + if _, useFullPath := os.LookupEnv(ecsFullPathEnvVar); useFullPath { + token, tokenExists := os.LookupEnv(ecsAuthorizationTokenEnvVar) + if !tokenExists { + return errors.New("unable to get ECS metadata authorization token") + } + req.Header.Set("Authorization", token) + } + // if in the EC2 environment, we will use IMDSv2, which requires a session cookie from a // PUT request on the token endpoint before it will give the credentials, this provides // protection from SSRF attacks @@ -318,6 +335,169 @@ func (cs *awsMetadataCredentialService) credentials(ctx context.Context) (aws.Cr return cs.creds, nil } +// awsAssumeRoleCredentialService represents a STS credential service that uses active IAM credentials +// to obtain temporary security credentials generated by AWS STS via AssumeRole API operation +type awsAssumeRoleCredentialService struct { + RegionName string `json:"aws_region"` + RoleArn string `json:"iam_role_arn"` + SessionName string `json:"session_name"` + Domain string `json:"aws_domain"` + AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` + stsURL string + creds aws.Credentials + expiration time.Time + logger logging.Logger +} + +func (cs *awsAssumeRoleCredentialService) populateFromEnv() error { + if cs.AWSSigningPlugin == nil { + return errors.New("a AWS signing plugin must be specified when AssumeRole credential provider is enabled") + } + + switch { + case cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil: + case cs.AWSSigningPlugin.AWSProfileCredentials != nil: + case cs.AWSSigningPlugin.AWSMetadataCredentials != nil: + default: + return errors.New("unsupported AWS signing plugin with AssumeRole credential provider") + } + + if cs.AWSSigningPlugin.AWSMetadataCredentials != nil { + if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { + if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName = os.Getenv(awsRegionEnvVar); cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { + return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") + } + } + } + + if cs.AWSSigningPlugin.AWSSignatureVersion == "" { + cs.AWSSigningPlugin.AWSSignatureVersion = "4" + } + + if cs.Domain == "" { + cs.Domain = os.Getenv(awsDomainEnvVar) + } + + if cs.RegionName == "" { + if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { + return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") + } + } + + if cs.RoleArn == "" { + if cs.RoleArn = os.Getenv(awsRoleArnEnvVar); cs.RoleArn == "" { + return errors.New("no " + awsRoleArnEnvVar + " set in environment or configuration") + } + } + + return nil +} + +func (cs *awsAssumeRoleCredentialService) signingCredentials(ctx context.Context) (aws.Credentials, error) { + if cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil { + cs.AWSSigningPlugin.AWSEnvironmentCredentials.logger = cs.logger + return cs.AWSSigningPlugin.AWSEnvironmentCredentials.credentials(ctx) + } + + if cs.AWSSigningPlugin.AWSProfileCredentials != nil { + cs.AWSSigningPlugin.AWSProfileCredentials.logger = cs.logger + return cs.AWSSigningPlugin.AWSProfileCredentials.credentials(ctx) + } + + cs.AWSSigningPlugin.AWSMetadataCredentials.logger = cs.logger + return cs.AWSSigningPlugin.AWSMetadataCredentials.credentials(ctx) +} + +func (cs *awsAssumeRoleCredentialService) stsPath() string { + return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) +} + +func (cs *awsAssumeRoleCredentialService) refreshFromService(ctx context.Context) error { + // define the expected JSON payload from the EC2 credential service + // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html + type responsePayload struct { + Result struct { + Credentials struct { + SessionToken string + SecretAccessKey string + Expiration time.Time + AccessKeyID string `xml:"AccessKeyId"` + } + } `xml:"AssumeRoleResult"` + } + + // short circuit if a reasonable amount of time until credential expiration remains + if time.Now().Add(time.Minute * 5).Before(cs.expiration) { + cs.logger.Debug("Credentials previously obtained from sts service still valid.") + return nil + } + + cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) + + var sessionName string + if cs.SessionName == "" { + sessionName = "open-policy-agent" + } else { + sessionName = cs.SessionName + } + + queryVals := url.Values{ + "Action": []string{"AssumeRole"}, + "RoleSessionName": []string{sessionName}, + "RoleArn": []string{cs.RoleArn}, + "Version": []string{"2011-06-15"}, + } + stsRequestURL, _ := url.Parse(cs.stsPath()) + + // construct an HTTP client with a reasonably short timeout + client := &http.Client{Timeout: time.Second * 10} + req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) + if err != nil { + return errors.New("unable to construct STS HTTP request: " + err.Error()) + } + + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + // Note: Calls to AWS STS AssumeRole must be signed using the access key ID + // and secret access key + signingCreds, err := cs.signingCredentials(ctx) + if err != nil { + return err + } + + err = aws.SignRequest(req, "sts", signingCreds, time.Now(), cs.AWSSigningPlugin.AWSSignatureVersion) + if err != nil { + return err + } + + body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) + if err != nil { + return err + } + + var payload responsePayload + err = xml.Unmarshal(body, &payload) + if err != nil { + return errors.New("failed to parse credential response from STS service: " + err.Error()) + } + + cs.expiration = payload.Result.Credentials.Expiration + cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID + cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey + cs.creds.SessionToken = payload.Result.Credentials.SessionToken + cs.creds.RegionName = cs.RegionName + + return nil +} + +func (cs *awsAssumeRoleCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { + err := cs.refreshFromService(ctx) + if err != nil { + return cs.creds, err + } + return cs.creds, nil +} + // awsWebIdentityCredentialService represents an STS WebIdentity credential services type awsWebIdentityCredentialService struct { RoleArn string @@ -354,23 +534,7 @@ func (cs *awsWebIdentityCredentialService) populateFromEnv() error { } func (cs *awsWebIdentityCredentialService) stsPath() string { - var domain string - if cs.Domain != "" { - domain = strings.ToLower(cs.Domain) - } else { - domain = stsDefaultDomain - } - - var stsPath string - switch { - case cs.stsURL != "": - stsPath = cs.stsURL - case cs.RegionName != "": - stsPath = fmt.Sprintf(stsRegionPath, strings.ToLower(cs.RegionName), domain) - default: - stsPath = fmt.Sprintf(stsDefaultPath, domain) - } - return stsPath + return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) } func (cs *awsWebIdentityCredentialService) refreshFromService(ctx context.Context) error { @@ -457,8 +621,9 @@ func (cs *awsWebIdentityCredentialService) credentials(ctx context.Context) (aws func isECS() bool { // the special relative path URI is set by the container agent in the ECS environment only - _, isECS := os.LookupEnv(ecsRelativePathEnvVar) - return isECS + _, isECSRelative := os.LookupEnv(ecsRelativePathEnvVar) + _, isECSFull := os.LookupEnv(ecsFullPathEnvVar) + return isECSRelative || isECSFull } // ecrAuthPlugin authorizes requests to AWS ECR. @@ -555,3 +720,23 @@ func (ap *awsKMSSignPlugin) SignDigest(ctx context.Context, digest []byte, keyID return signature, nil } + +func getSTSPath(stsDomain, stsURL, regionName string) string { + var domain string + if stsDomain != "" { + domain = strings.ToLower(stsDomain) + } else { + domain = stsDefaultDomain + } + + var stsPath string + switch { + case stsURL != "": + stsPath = stsURL + case regionName != "": + stsPath = fmt.Sprintf(stsRegionPath, strings.ToLower(regionName), domain) + default: + stsPath = fmt.Sprintf(stsDefaultPath, domain) + } + return stsPath +} diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go index 17873e380a..5a5ea0d123 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/rego.go +++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go @@ -124,6 +124,7 @@ type EvalContext struct { printHook print.Hook capabilities *ast.Capabilities strictBuiltinErrors bool + virtualCache topdown.VirtualCache } func (e *EvalContext) RawInput() *interface{} { @@ -342,6 +343,14 @@ func EvalPrintHook(ph print.Hook) EvalOption { } } +// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is +// optional, and if not set, the default cache is used. +func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { + return func(e *EvalContext) { + e.virtualCache = vc + } +} + func (pq preparedQuery) Modules() map[string]*ast.Module { mods := make(map[string]*ast.Module) @@ -1496,7 +1505,7 @@ func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResu return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here } -func (r *Rego) compileWasm(modules []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { +func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { policy, err := r.planQuery(queries, qType) if err != nil { return nil, err @@ -1761,14 +1770,15 @@ func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage return err } - futureImports := []*ast.Import{} + queryImports := []*ast.Import{} for _, imp := range imports { - if imp.Path.Value.(ast.Ref).HasPrefix(ast.Ref([]*ast.Term{ast.FutureRootDocument})) { - futureImports = append(futureImports, imp) + path := imp.Path.Value.(ast.Ref) + if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { + queryImports = append(queryImports, imp) } } - r.parsedQuery, err = r.parseQuery(futureImports, r.metrics) + r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) if err != nil { return err } @@ -1870,7 +1880,7 @@ func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics return nil } -func (r *Rego) loadBundles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { +func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { if len(r.bundlePaths) == 0 { return nil } @@ -1921,7 +1931,7 @@ func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Valu return ast.InterfaceToValue(*rawPtr) } -func (r *Rego) parseQuery(futureImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { +func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { if r.parsedQuery != nil { return r.parsedQuery, nil } @@ -1929,7 +1939,11 @@ func (r *Rego) parseQuery(futureImports []*ast.Import, m metrics.Metrics) (ast.B m.Timer(metrics.RegoQueryParse).Start() defer m.Timer(metrics.RegoQueryParse).Stop() - popts, err := future.ParserOptionsFromFutureImports(futureImports) + popts, err := future.ParserOptionsFromFutureImports(queryImports) + if err != nil { + return nil, err + } + popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) if err != nil { return nil, err } @@ -1937,6 +1951,17 @@ func (r *Rego) parseQuery(futureImports []*ast.Import, m metrics.Metrics) (ast.B return ast.ParseBodyWithOpts(r.query, popts) } +func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { + popts.RegoVersion = ast.RegoV1 + return popts, nil + } + } + return popts, nil +} + func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { // Only compile again if there are new modules. @@ -2019,7 +2044,7 @@ func (r *Rego) prepareImports() ([]*ast.Import, error) { return imports, nil } -func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { +func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { var pkg *ast.Package if r.pkg != "" { @@ -2085,7 +2110,8 @@ func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { WithBuiltinErrorList(r.builtinErrorList). WithSeed(ectx.seed). WithPrintHook(ectx.printHook). - WithDistributedTracingOpts(r.distributedTacingOpts) + WithDistributedTracingOpts(r.distributedTacingOpts). + WithVirtualCache(ectx.virtualCache) if !ectx.time.IsZero() { q = q.WithTime(ectx.time) @@ -2405,6 +2431,53 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, return nil, err } + if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) { + // If the target rego-version in v0, and the rego.v1 import is available, then we attempt to apply it to support modules. + + for i, mod := range support { + if mod.RegoVersion() != ast.RegoV0 { + continue + } + + // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that + // conflict with future keywords. + applyRegoVersion := true + + ast.WalkRules(mod, func(r *ast.Rule) bool { + name := r.Head.Name + if name == "" && len(r.Head.Reference) > 0 { + name = r.Head.Reference[0].Value.(ast.Var) + } + if ast.IsFutureKeyword(name.String()) { + applyRegoVersion = false + return true + } + return false + }) + + if applyRegoVersion { + ast.WalkVars(mod, func(v ast.Var) bool { + if ast.IsFutureKeyword(v.String()) { + applyRegoVersion = false + return true + } + return false + }) + } + + if applyRegoVersion { + support[i].SetRegoVersion(ast.RegoV0CompatV1) + } else { + support[i].SetRegoVersion(r.regoVersion) + } + } + } else { + // If the target rego-version is not v0, then we apply the target rego-version to the support modules. + for i := range support { + support[i].SetRegoVersion(r.regoVersion) + } + } + pq := &PartialQueries{ Queries: queries, Support: support, @@ -2413,7 +2486,7 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, return pq, nil } -func (r *Rego) rewriteQueryToCaptureValue(qc ast.QueryCompiler, query ast.Body) (ast.Body, error) { +func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { checkCapture := iteration(query) || len(query) > 1 @@ -2530,7 +2603,7 @@ type transactionCloser func(ctx context.Context, err error) error // regardless of status. func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { - noopCloser := func(ctx context.Context, err error) error { + noopCloser := func(_ context.Context, _ error) error { return nil // no-op default } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/topdown/bindings.go index 7621dac529..30a8ac5ec4 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/bindings.go @@ -43,7 +43,7 @@ func (u *bindings) Iter(caller *bindings, iter func(*ast.Term, *ast.Term) error) var err error - u.values.Iter(func(k *ast.Term, v value) bool { + u.values.Iter(func(k *ast.Term, _ value) bool { if err != nil { return true } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache.go index 710efee475..265457e02f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/cache.go @@ -9,6 +9,29 @@ import ( "github.com/open-policy-agent/opa/util" ) +// VirtualCache defines the interface for a cache that stores the results of +// evaluated virtual documents (rules). +// The cache is a stack of frames, where each frame is a mapping from references +// to values. +type VirtualCache interface { + // Push pushes a new, empty frame of value mappings onto the stack. + Push() + + // Pop pops the top frame of value mappings from the stack, removing all associated entries. + Pop() + + // Get returns the value associated with the given reference. The second return value + // indicates whether the reference has a recorded 'undefined' result. + Get(ref ast.Ref) (*ast.Term, bool) + + // Put associates the given reference with the given value. If the value is nil, the reference + // is marked as having an 'undefined' result. + Put(ref ast.Ref, value *ast.Term) + + // Keys returns the set of keys that have been cached for the active frame. + Keys() []ast.Ref +} + type virtualCache struct { stack []*virtualCacheElem } @@ -19,7 +42,7 @@ type virtualCacheElem struct { undefined bool } -func newVirtualCache() *virtualCache { +func NewVirtualCache() VirtualCache { cache := &virtualCache{} cache.Push() return cache @@ -77,6 +100,26 @@ func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { } } +func (c *virtualCache) Keys() []ast.Ref { + node := c.stack[len(c.stack)-1] + return keysRecursive(nil, node) +} + +func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { + var keys []ast.Ref + node.children.Iter(func(k, v util.T) bool { + ref := root.Append(k.(*ast.Term)) + if v.(*virtualCacheElem).value != nil { + keys = append(keys, ref) + } + if v.(*virtualCacheElem).children.Len() > 0 { + keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...) + } + return false + }) + return keys +} + func newVirtualCacheElem() *virtualCacheElem { return &virtualCacheElem{children: newVirtualCacheHashMap()} } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go index 4c6b8c42c7..8824d19bd2 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go @@ -222,11 +222,11 @@ func (p *CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Ex // errors unreachable. x, err := ast.Transform(xform, expr.Copy()) - if expr, ok := x.(*ast.Expr); !ok || err != nil { + expr, ok := x.(*ast.Expr) + if !ok || err != nil { panic("unreachable") - } else { - return expr } + return expr } type bindingPlugTransform struct { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go index 520c051860..f24432a264 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go @@ -21,6 +21,7 @@ import ( "hash" "os" "strings" + "time" "github.com/open-policy-agent/opa/internal/jwx/jwk" @@ -104,7 +105,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a return iter(invalid) } - verified, err := verifyX509CertificateChain(certs) + verified, err := verifyX509CertificateChain(certs, x509.VerifyOptions{}) if err != nil { return iter(invalid) } @@ -122,6 +123,153 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a return iter(valid) } +var allowedKeyUsages = map[string]x509.ExtKeyUsage{ + "KeyUsageAny": x509.ExtKeyUsageAny, + "KeyUsageServerAuth": x509.ExtKeyUsageServerAuth, + "KeyUsageClientAuth": x509.ExtKeyUsageClientAuth, + "KeyUsageCodeSigning": x509.ExtKeyUsageCodeSigning, + "KeyUsageEmailProtection": x509.ExtKeyUsageEmailProtection, + "KeyUsageIPSECEndSystem": x509.ExtKeyUsageIPSECEndSystem, + "KeyUsageIPSECTunnel": x509.ExtKeyUsageIPSECTunnel, + "KeyUsageIPSECUser": x509.ExtKeyUsageIPSECUser, + "KeyUsageTimeStamping": x509.ExtKeyUsageTimeStamping, + "KeyUsageOCSPSigning": x509.ExtKeyUsageOCSPSigning, + "KeyUsageMicrosoftServerGatedCrypto": x509.ExtKeyUsageMicrosoftServerGatedCrypto, + "KeyUsageNetscapeServerGatedCrypto": x509.ExtKeyUsageNetscapeServerGatedCrypto, + "KeyUsageMicrosoftCommercialCodeSigning": x509.ExtKeyUsageMicrosoftCommercialCodeSigning, + "KeyUsageMicrosoftKernelCodeSigning": x509.ExtKeyUsageMicrosoftKernelCodeSigning, +} + +func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + input, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + options, err := builtins.ObjectOperand(operands[1].Value, 2) + if err != nil { + return err + } + + invalid := ast.ArrayTerm( + ast.BooleanTerm(false), + ast.NewTerm(ast.NewArray()), + ) + + certs, err := getX509CertsFromString(string(input)) + if err != nil { + return iter(invalid) + } + + // Collect the cert verification options + verifyOpt, err := extractVerifyOpts(options) + if err != nil { + return err + } + + verified, err := verifyX509CertificateChain(certs, verifyOpt) + if err != nil { + return iter(invalid) + } + + value, err := ast.InterfaceToValue(verified) + if err != nil { + return err + } + + valid := ast.ArrayTerm( + ast.BooleanTerm(true), + ast.NewTerm(value), + ) + + return iter(valid) +} + +func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) { + + for _, key := range options.Keys() { + k, err := ast.JSON(key.Value) + if err != nil { + return verifyOpt, err + } + k, ok := k.(string) + if !ok { + continue + } + + switch k { + case "DNSName": + dns, ok := options.Get(key).Value.(ast.String) + if ok { + verifyOpt.DNSName = strings.Trim(string(dns), "\"") + } else { + return verifyOpt, fmt.Errorf("'DNSName' should be a string") + } + case "CurrentTime": + c, ok := options.Get(key).Value.(ast.Number) + if ok { + nanosecs, ok := c.Int64() + if ok { + verifyOpt.CurrentTime = time.Unix(0, nanosecs) + } else { + return verifyOpt, fmt.Errorf("'CurrentTime' should be a valid int64 number") + } + } else { + return verifyOpt, fmt.Errorf("'CurrentTime' should be a number") + } + case "MaxConstraintComparisons": + c, ok := options.Get(key).Value.(ast.Number) + if ok { + maxComparisons, ok := c.Int() + if ok { + verifyOpt.MaxConstraintComparisions = maxComparisons + } else { + return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a valid number") + } + } else { + return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a number") + } + case "KeyUsages": + type forEach interface { + Foreach(func(*ast.Term)) + } + var ks forEach + switch options.Get(key).Value.(type) { + case *ast.Array: + ks = options.Get(key).Value.(*ast.Array) + case ast.Set: + ks = options.Get(key).Value.(ast.Set) + default: + return verifyOpt, fmt.Errorf("'KeyUsages' should be an Array or Set") + } + + // Collect the x509.ExtKeyUsage values by looking up the + // mapping of key usage strings to x509.ExtKeyUsage + var invalidKUsgs []string + ks.Foreach(func(t *ast.Term) { + u, ok := t.Value.(ast.String) + if ok { + v := strings.Trim(string(u), "\"") + if k, ok := allowedKeyUsages[v]; ok { + verifyOpt.KeyUsages = append(verifyOpt.KeyUsages, k) + } else { + invalidKUsgs = append(invalidKUsgs, v) + } + } + }) + if len(invalidKUsgs) > 0 { + return x509.VerifyOptions{}, fmt.Errorf("invalid entries for 'KeyUsages' found: %s", invalidKUsgs) + } + default: + return verifyOpt, fmt.Errorf("invalid key option") + } + + } + + return verifyOpt, nil +} + func builtinCryptoX509ParseKeyPair(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { certificate, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { @@ -380,6 +528,7 @@ func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*a func init() { RegisterBuiltinFunc(ast.CryptoX509ParseCertificates.Name, builtinCryptoX509ParseCertificates) RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificates.Name, builtinCryptoX509ParseAndVerifyCertificates) + RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificatesWithOptions.Name, builtinCryptoX509ParseAndVerifyCertificatesWithOptions) RegisterBuiltinFunc(ast.CryptoMd5.Name, builtinCryptoMd5) RegisterBuiltinFunc(ast.CryptoSha1.Name, builtinCryptoSha1) RegisterBuiltinFunc(ast.CryptoSha256.Name, builtinCryptoSha256) @@ -394,7 +543,7 @@ func init() { RegisterBuiltinFunc(ast.CryptoHmacEqual.Name, builtinCryptoHmacEqual) } -func verifyX509CertificateChain(certs []*x509.Certificate) ([]*x509.Certificate, error) { +func verifyX509CertificateChain(certs []*x509.Certificate, vo x509.VerifyOptions) ([]*x509.Certificate, error) { if len(certs) < 2 { return nil, builtins.NewOperandErr(1, "must supply at least two certificates to be able to verify") } @@ -414,8 +563,12 @@ func verifyX509CertificateChain(certs []*x509.Certificate) ([]*x509.Certificate, // verify the cert chain back to the root verifyOpts := x509.VerifyOptions{ - Roots: roots, - Intermediates: intermediates, + Roots: roots, + Intermediates: intermediates, + DNSName: vo.DNSName, + CurrentTime: vo.CurrentTime, + KeyUsages: vo.KeyUsages, + MaxConstraintComparisions: vo.MaxConstraintComparisions, } chains, err := leaf.Verify(verifyOpts) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/topdown/encoding.go index 19ba323d19..f3475a60d0 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/encoding.go @@ -35,6 +35,92 @@ func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return iter(ast.StringTerm(string(bs))) } +func builtinJSONMarshalWithOpts(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + asJSON, err := ast.JSON(operands[0].Value) + if err != nil { + return err + } + + indentWith := "\t" + prefixWith := "" + implicitPrettyPrint := false + userDeclaredExplicitPrettyPrint := false + shouldPrettyPrint := false + + marshalOpts, err := builtins.ObjectOperand(operands[1].Value, 2) + if err != nil { + return err + } + + for idx, k := range marshalOpts.Keys() { + + val := marshalOpts.Get(k) + + key, err := builtins.StringOperand(k.Value, idx) + if err != nil { + return builtins.NewOperandErr(2, "failed to stringify key %v at index %d: %v", k, idx, err) + } + + switch key { + + case "prefix": + prefixOpt, err := builtins.StringOperand(val.Value, idx) + if err != nil { + return builtins.NewOperandErr(2, "key %s failed cast to string: %v", key, err) + } + prefixWith = string(prefixOpt) + implicitPrettyPrint = true + + case "indent": + indentOpt, err := builtins.StringOperand(val.Value, idx) + if err != nil { + return builtins.NewOperandErr(2, "key %s failed cast to string: %v", key, err) + + } + indentWith = string(indentOpt) + implicitPrettyPrint = true + + case "pretty": + userDeclaredExplicitPrettyPrint = true + explicitPrettyPrint, ok := val.Value.(ast.Boolean) + if !ok { + return builtins.NewOperandErr(2, "key %s failed cast to bool", key) + } + + shouldPrettyPrint = bool(explicitPrettyPrint) + + default: + return builtins.NewOperandErr(2, "object contained unknown key %s", key) + } + + } + + if !userDeclaredExplicitPrettyPrint { + shouldPrettyPrint = implicitPrettyPrint + } + + var bs []byte + + if shouldPrettyPrint { + bs, err = json.MarshalIndent(asJSON, prefixWith, indentWith) + } else { + bs, err = json.Marshal(asJSON) + } + + if err != nil { + return err + } + + if shouldPrettyPrint { + // json.MarshalIndent() function will not prefix the first line of emitted JSON + return iter(ast.StringTerm(prefixWith + string(bs))) + } + + return iter(ast.StringTerm(string(bs))) + +} + func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) @@ -299,6 +385,7 @@ func builtinHexDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter func init() { RegisterBuiltinFunc(ast.JSONMarshal.Name, builtinJSONMarshal) + RegisterBuiltinFunc(ast.JSONMarshalWithOptions.Name, builtinJSONMarshalWithOpts) RegisterBuiltinFunc(ast.JSONUnmarshal.Name, builtinJSONUnmarshal) RegisterBuiltinFunc(ast.JSONIsValid.Name, builtinJSONIsValid) RegisterBuiltinFunc(ast.Base64Encode.Name, builtinBase64Encode) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/topdown/eval.go index d6a10a43ea..2fcc431c80 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/eval.go @@ -2,6 +2,7 @@ package topdown import ( "context" + "errors" "fmt" "io" "sort" @@ -50,6 +51,12 @@ func (ee *earlyExitError) Error() string { return fmt.Sprintf("%v: early exit", ee.e.query) } +type deferredEarlyExitError earlyExitError + +func (ee deferredEarlyExitError) Error() string { + return fmt.Sprintf("%v: deferred early exit", ee.e.query) +} + type eval struct { ctx context.Context metrics metrics.Metrics @@ -83,7 +90,7 @@ type eval struct { builtinCache builtins.Cache ndBuiltinCache builtins.NDBCache functionMocks *functionMocksStack - virtualCache *virtualCache + virtualCache VirtualCache comprehensionCache *comprehensionCache interQueryBuiltinCache cache.InterQueryCache saveSet *saveSet @@ -230,6 +237,10 @@ func (e *eval) traceWasm(x ast.Node, target *ast.Ref) { e.traceEvent(WasmOp, x, "", target) } +func (e *eval) traceUnify(a, b *ast.Term) { + e.traceEvent(UnifyOp, ast.Equality.Expr(a, b), "", nil) +} + func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) { if !e.traceEnabled { @@ -268,6 +279,7 @@ func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) { evt.Locals = ast.NewValueMap() evt.LocalMetadata = map[ast.Var]VarMetadata{} + evt.localVirtualCacheSnapshot = ast.NewValueMap() _ = e.bindings.Iter(nil, func(k, v *ast.Term) error { original := k.Value.(ast.Var) @@ -283,15 +295,21 @@ func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) { }) // cannot return error ast.WalkTerms(x, func(term *ast.Term) bool { - if v, ok := term.Value.(ast.Var); ok { - if _, ok := evt.LocalMetadata[v]; !ok { - if rewritten, ok := e.rewrittenVar(v); ok { - evt.LocalMetadata[v] = VarMetadata{ + switch x := term.Value.(type) { + case ast.Var: + if _, ok := evt.LocalMetadata[x]; !ok { + if rewritten, ok := e.rewrittenVar(x); ok { + evt.LocalMetadata[x] = VarMetadata{ Name: rewritten, Location: term.Loc(), } } } + case ast.Ref: + groundRef := x.GroundPrefix() + if v, _ := e.virtualCache.Get(groundRef); v != nil { + evt.localVirtualCacheSnapshot.Put(groundRef, v.Value) + } } return false }) @@ -307,6 +325,13 @@ func (e *eval) eval(iter evalIterator) error { } func (e *eval) evalExpr(iter evalIterator) error { + wrapErr := func(err error) error { + if !e.findOne { + // The current rule/function doesn't support EE, but a caller (somewhere down the call stack) does. + return &deferredEarlyExitError{prev: err, e: e} + } + return &earlyExitError{prev: err, e: e} + } if e.cancel != nil && e.cancel.Cancelled() { return &Error{ @@ -317,16 +342,18 @@ func (e *eval) evalExpr(iter evalIterator) error { if e.index >= len(e.query) { err := iter(e) + if err != nil { - ee, ok := err.(*earlyExitError) - if !ok { + switch err := err.(type) { + case *deferredEarlyExitError: + return wrapErr(err) + case *earlyExitError: + return wrapErr(err) + default: return err } - if !e.findOne { - return nil - } - return &earlyExitError{prev: ee, e: e} } + if e.findOne && !e.partial() { // we've found one! return &earlyExitError{e: e} } @@ -391,15 +418,9 @@ func (e *eval) evalStep(iter evalIterator) error { }) case *ast.Every: eval := evalEvery{ - e: e, - expr: expr, - generator: ast.NewBody( - ast.Equality.Expr( - ast.RefTerm(terms.Domain, terms.Key).SetLocation(terms.Domain.Location), - terms.Value, - ).SetLocation(terms.Domain.Location), - ), - body: terms.Body, + Every: terms, + e: e, + expr: expr, } err = eval.eval(func() error { defined = true @@ -848,7 +869,7 @@ func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) err a, b1 = b1.apply(a) b, b2 = b2.apply(b) if e.traceEnabled { - e.traceEvent(UnifyOp, ast.Equality.Expr(a, b), "", nil) + e.traceUnify(a, b) } switch vA := a.Value.(type) { case ast.Var, ast.Ref, *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: @@ -1086,10 +1107,10 @@ func (e *eval) biunifyComprehension(a, b *ast.Term, b1, b2 *bindings, swap bool, return err } else if value != nil { return e.biunify(value, b, b1, b2, iter) - } else { - e.instr.counterIncr(evalOpComprehensionCacheMiss) } + e.instr.counterIncr(evalOpComprehensionCacheMiss) + switch a := a.Value.(type) { case *ast.ArrayComprehension: return e.biunifyComprehensionArray(a, b, b1, b2, iter) @@ -1826,7 +1847,7 @@ func (e evalFunc) eval(iter unifyIterator) error { } } - return suppressEarlyExit(e.evalValue(iter, argCount, e.ir.EarlyExit)) + return e.evalValue(iter, argCount, e.ir.EarlyExit) } func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { @@ -1844,33 +1865,52 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro var prev *ast.Term - for _, rule := range e.ir.Rules { - next, err := e.evalOneRule(iter, rule, cacheKey, prev, findOne) - if err != nil { - return err - } - if next == nil { - for _, erule := range e.ir.Else[rule] { - next, err = e.evalOneRule(iter, erule, cacheKey, prev, findOne) - if err != nil { + return withSuppressEarlyExit(func() error { + var outerEe *deferredEarlyExitError + for _, rule := range e.ir.Rules { + next, err := e.evalOneRule(iter, rule, cacheKey, prev, findOne) + if err != nil { + if oee, ok := err.(*deferredEarlyExitError); ok { + if outerEe == nil { + outerEe = oee + } + } else { return err } - if next != nil { - break + } + if next == nil { + for _, erule := range e.ir.Else[rule] { + next, err = e.evalOneRule(iter, erule, cacheKey, prev, findOne) + if err != nil { + if oee, ok := err.(*deferredEarlyExitError); ok { + if outerEe == nil { + outerEe = oee + } + } else { + return err + } + } + if next != nil { + break + } } } + if next != nil { + prev = next + } } - if next != nil { - prev = next + + if e.ir.Default != nil && prev == nil { + _, err := e.evalOneRule(iter, e.ir.Default, cacheKey, prev, findOne) + return err } - } - if e.ir.Default != nil && prev == nil { - _, err := e.evalOneRule(iter, e.ir.Default, cacheKey, prev, findOne) - return err - } + if outerEe != nil { + return outerEe + } - return nil + return nil + }) } func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) { @@ -2129,6 +2169,18 @@ func (e evalTree) enumerate(iter unifyIterator) error { return err } + var deferredEe *deferredEarlyExitError + handleErr := func(err error) error { + var dee *deferredEarlyExitError + if errors.As(err, &dee) { + if deferredEe == nil { + deferredEe = dee + } + return nil + } + return err + } + if doc != nil { switch doc := doc.(type) { case *ast.Array: @@ -2137,31 +2189,37 @@ func (e evalTree) enumerate(iter unifyIterator) error { err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) }) - if err != nil { + + if err := handleErr(err); err != nil { return err } } case ast.Object: ki := doc.KeysIterator() for k, more := ki.Next(); more; k, more = ki.Next() { - if err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { + err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) - }); err != nil { + }) + if err := handleErr(err); err != nil { return err } } case ast.Set: - err := doc.Iter(func(elem *ast.Term) error { - return e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error { + if err := doc.Iter(func(elem *ast.Term) error { + err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, elem) }) - }) - if err != nil { + return handleErr(err) + }); err != nil { return err } } } + if deferredEe != nil { + return deferredEe + } + if e.node == nil { return nil } @@ -2349,6 +2407,15 @@ type evalVirtualPartialCacheHint struct { full bool } +func (h *evalVirtualPartialCacheHint) keyWithoutScope() ast.Ref { + if h.key != nil { + if _, ok := h.key[len(h.key)-1].Value.(vcKeyScope); ok { + return h.key[:len(h.key)-1] + } + } + return h.key +} + func (e evalVirtualPartial) eval(iter unifyIterator) error { unknown := e.e.unknown(e.ref[:e.pos+1], e.bindings) @@ -2427,7 +2494,7 @@ func (e evalVirtualPartial) evalEachRule(iter unifyIterator, unknown bool) error } if hint.key != nil { - if v, err := result.Value.Find(hint.key[e.pos+1:]); err == nil && v != nil { + if v, err := result.Value.Find(hint.keyWithoutScope()[e.pos+1:]); err == nil && v != nil { e.e.virtualCache.Put(hint.key, ast.NewTerm(v)) } } @@ -2513,7 +2580,7 @@ func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Ru } // Walk the dynamic portion of rule ref and key to unify vars - err := child.biunifyRuleHead(e.pos+1, e.ref, rule, e.bindings, child.bindings, func(pos int) error { + err := child.biunifyRuleHead(e.pos+1, e.ref, rule, e.bindings, child.bindings, func(_ int) error { defined = true return child.eval(func(child *eval) error { @@ -2601,7 +2668,7 @@ func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.R err := child.eval(func(child *eval) error { defined = true - return e.e.biunifyRuleHead(e.pos+1, e.ref, rule, e.bindings, child.bindings, func(pos int) error { + return e.e.biunifyRuleHead(e.pos+1, e.ref, rule, e.bindings, child.bindings, func(_ int) error { return e.evalOneRuleContinue(iter, rule, child) }) }) @@ -2677,7 +2744,7 @@ func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error { return e.e.saveUnify(term, e.rterm, e.bindings, e.rbindings, iter) } -func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) { +func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (bool, error) { child := e.e.child(rule.Body) child.traceEnter(rule) @@ -2774,6 +2841,8 @@ func (e evalVirtualPartial) evalCache(iter unifyIterator) (evalVirtualPartialCac plugged := e.bindings.Plug(e.ref[e.pos+1]) if _, ok := plugged.Value.(ast.Var); ok { + // Note: we might have additional opportunity to optimize here, if we consider that ground values + // right of e.pos could create a smaller eval "scope" through ref bi-unification before evaluating rules. hint.full = true hint.key = e.plugged[:e.pos+1] e.e.instr.counterIncr(evalOpVirtualCacheMiss) @@ -2782,19 +2851,76 @@ func (e evalVirtualPartial) evalCache(iter unifyIterator) (evalVirtualPartialCac m := maxRefLength(e.ir.Rules, len(e.ref)) + // Creating the hint key by walking the ref and plugging vars until we hit a non-ground term. + // Any ground term right of this point will affect the scope of evaluation by ref unification, + // so we create a virtual-cache scope key to qualify the result stored in the cache. + // + // E.g. given the following rule: + // + // package example + // + // a[x][y][z] := x + y + z if { + // some x in [1, 2] + // some y in [3, 4] + // some z in [5, 6] + // } + // + // and the following ref (1): + // + // data.example.a[1][_][5] + // + // then the hint key will be: + // + // data.example.a[1][<_,5>] + // + // where <_,5> is the scope of the pre-eval unification. + // This part does not contribute to the "location" of the cached data. + // + // The following ref (2): + // + // data.example.a[1][_][6] + // + // will produce the same hint key "location" 'data.example.a[1]', but a different scope component + // '<_,6>', which will create a different entry in the cache. + scoping := false + hintKeyEnd := 0 for i := e.pos + 1; i < m; i++ { plugged = e.bindings.Plug(e.ref[i]) - if !plugged.IsGround() { - break + if plugged.IsGround() && !scoping { + hintKeyEnd = i + hint.key = append(e.plugged[:i], plugged) + } else { + scoping = true + hl := len(hint.key) + if hl == 0 { + break + } + if scope, ok := hint.key[hl-1].Value.(vcKeyScope); ok { + scope.Ref = append(scope.Ref, plugged) + hint.key[len(hint.key)-1] = ast.NewTerm(scope) + } else { + scope = vcKeyScope{} + scope.Ref = append(scope.Ref, plugged) + hint.key = append(hint.key, ast.NewTerm(scope)) + } } - hint.key = append(e.plugged[:i], plugged) - if cached, _ := e.e.virtualCache.Get(hint.key); cached != nil { e.e.instr.counterIncr(evalOpVirtualCacheHit) hint.hit = true - return hint, e.evalTerm(iter, i+1, cached, e.bindings) + return hint, e.evalTerm(iter, hintKeyEnd+1, cached, e.bindings) + } + } + + if hl := len(hint.key); hl > 0 { + if scope, ok := hint.key[hl-1].Value.(vcKeyScope); ok { + scope = scope.reduce() + if scope.empty() { + hint.key = hint.key[:hl-1] + } else { + hint.key[hl-1].Value = scope + } } } @@ -2803,6 +2929,85 @@ func (e evalVirtualPartial) evalCache(iter unifyIterator) (evalVirtualPartialCac return hint, nil } +// vcKeyScope represents the scoping that pre-rule-eval ref unification imposes on a virtual cache entry. +type vcKeyScope struct { + ast.Ref +} + +func (q vcKeyScope) Compare(other ast.Value) int { + if q2, ok := other.(vcKeyScope); ok { + r1 := q.Ref + r2 := q2.Ref + if len(r1) != len(r2) { + return -1 + } + + for i := range r1 { + _, v1IsVar := r1[i].Value.(ast.Var) + _, v2IsVar := r2[i].Value.(ast.Var) + if v1IsVar && v2IsVar { + continue + } + if r1[i].Value.Compare(r2[i].Value) != 0 { + return -1 + } + } + + return 0 + } + return 1 +} + +func (vcKeyScope) Find(ast.Ref) (ast.Value, error) { + return nil, nil +} + +func (q vcKeyScope) Hash() int { + var hash int + for _, v := range q.Ref { + if _, ok := v.Value.(ast.Var); ok { + // all vars are equal + hash++ + } else { + hash += v.Value.Hash() + } + } + return hash +} + +func (q vcKeyScope) IsGround() bool { + return false +} + +func (q vcKeyScope) String() string { + buf := make([]string, 0, len(q.Ref)) + for _, t := range q.Ref { + if _, ok := t.Value.(ast.Var); ok { + buf = append(buf, "_") + } else { + buf = append(buf, t.String()) + } + } + return fmt.Sprintf("<%s>", strings.Join(buf, ",")) +} + +// reduce removes vars from the tail of the ref. +func (q vcKeyScope) reduce() vcKeyScope { + ref := q.Ref.Copy() + var i int + for i = len(q.Ref) - 1; i >= 0; i-- { + if _, ok := q.Ref[i].Value.(ast.Var); !ok { + break + } + } + ref = ref[:i+1] + return vcKeyScope{ref} +} + +func (q vcKeyScope) empty() bool { + return len(q.Ref) == 0 +} + func getNestedObject(ref ast.Ref, rootObj *ast.Object, b *bindings, l *ast.Location) (*ast.Object, error) { current := rootObj for _, term := range ref { @@ -2926,7 +3131,7 @@ func (e evalVirtualComplete) eval(iter unifyIterator) error { } if !e.e.unknown(e.ref, e.bindings) { - return suppressEarlyExit(e.evalValue(iter, e.ir.EarlyExit)) + return e.evalValue(iter, e.ir.EarlyExit) } var generateSupport bool @@ -2955,46 +3160,67 @@ func (e evalVirtualComplete) evalValue(iter unifyIterator, findOne bool) error { return nil } + // a cached result won't generate any EE from evaluating the rule, so we exempt it from EE suppression to not + // drop EE generated by the caller (through `iter` invocation). if cached != nil { e.e.instr.counterIncr(evalOpVirtualCacheHit) return e.evalTerm(iter, cached, e.bindings) } - e.e.instr.counterIncr(evalOpVirtualCacheMiss) + return withSuppressEarlyExit(func() error { + e.e.instr.counterIncr(evalOpVirtualCacheMiss) - var prev *ast.Term + var prev *ast.Term + var deferredEe *deferredEarlyExitError - for _, rule := range e.ir.Rules { - next, err := e.evalValueRule(iter, rule, prev, findOne) - if err != nil { - return err - } - if next == nil { - for _, erule := range e.ir.Else[rule] { - next, err = e.evalValueRule(iter, erule, prev, findOne) - if err != nil { + for _, rule := range e.ir.Rules { + next, err := e.evalValueRule(iter, rule, prev, findOne) + if err != nil { + if dee, ok := err.(*deferredEarlyExitError); ok { + if deferredEe == nil { + deferredEe = dee + } + } else { return err } - if next != nil { - break + } + if next == nil { + for _, erule := range e.ir.Else[rule] { + next, err = e.evalValueRule(iter, erule, prev, findOne) + if err != nil { + if dee, ok := err.(*deferredEarlyExitError); ok { + if deferredEe == nil { + deferredEe = dee + } + } else { + return err + } + } + if next != nil { + break + } } } + if next != nil { + prev = next + } } - if next != nil { - prev = next + + if e.ir.Default != nil && prev == nil { + _, err := e.evalValueRule(iter, e.ir.Default, prev, findOne) + return err } - } - if e.ir.Default != nil && prev == nil { - _, err := e.evalValueRule(iter, e.ir.Default, prev, findOne) - return err - } + if prev == nil { + e.e.virtualCache.Put(e.plugged[:e.pos+1], nil) + } - if prev == nil { - e.e.virtualCache.Put(e.plugged[:e.pos+1], nil) - } + if deferredEe != nil { + return deferredEe + } - return nil + return nil + }) } func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term, findOne bool) (*ast.Term, error) { @@ -3025,6 +3251,7 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p return err } + // TODO: trace redo if EE-err && !findOne(?) child.traceRedo(rule) return nil }) @@ -3197,6 +3424,17 @@ func (e evalTerm) next(iter unifyIterator, plugged *ast.Term) error { } func (e evalTerm) enumerate(iter unifyIterator) error { + var deferredEe *deferredEarlyExitError + handleErr := func(err error) error { + var dee *deferredEarlyExitError + if errors.As(err, &dee) { + if deferredEe == nil { + deferredEe = dee + } + return nil + } + return err + } switch v := e.term.Value.(type) { case *ast.Array: @@ -3205,24 +3443,34 @@ func (e evalTerm) enumerate(iter unifyIterator) error { err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) }) - if err != nil { + + if err := handleErr(err); err != nil { return err } } case ast.Object: - return v.Iter(func(k, _ *ast.Term) error { - return e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { + if err := v.Iter(func(k, _ *ast.Term) error { + err := e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(k)) }) - }) + return handleErr(err) + }); err != nil { + return err + } case ast.Set: - return v.Iter(func(elem *ast.Term) error { - return e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { + if err := v.Iter(func(elem *ast.Term) error { + err := e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(elem)) }) - }) + return handleErr(err) + }); err != nil { + return err + } } + if deferredEe != nil { + return deferredEe + } return nil } @@ -3294,19 +3542,32 @@ func (e evalTerm) save(iter unifyIterator) error { } type evalEvery struct { - e *eval - expr *ast.Expr - generator ast.Body - body ast.Body + *ast.Every + e *eval + expr *ast.Expr } func (e evalEvery) eval(iter unifyIterator) error { // unknowns in domain or body: save the expression, PE its body - if e.e.unknown(e.generator, e.e.bindings) || e.e.unknown(e.body, e.e.bindings) { + if e.e.unknown(e.Domain, e.e.bindings) || e.e.unknown(e.Body, e.e.bindings) { return e.save(iter) } - domain := e.e.closure(e.generator) + if pd := e.e.bindings.Plug(e.Domain); pd != nil { + if !isIterableValue(pd.Value) { + e.e.traceFail(e.expr) + return nil + } + } + + generator := ast.NewBody( + ast.Equality.Expr( + ast.RefTerm(e.Domain, e.Key).SetLocation(e.Domain.Location), + e.Value, + ).SetLocation(e.Domain.Location), + ) + + domain := e.e.closure(generator) all := true // all generator evaluations yield one successful body evaluation domain.traceEnter(e.expr) @@ -3317,14 +3578,14 @@ func (e evalEvery) eval(iter unifyIterator) error { // This would do extra work, like iterating needlessly if domain was a large array. return nil } - body := child.closure(e.body) + body := child.closure(e.Body) body.findOne = true - body.traceEnter(e.body) + body.traceEnter(e.Body) done := false err := body.eval(func(*eval) error { - body.traceExit(e.body) + body.traceExit(e.Body) done = true - body.traceRedo(e.body) + body.traceRedo(e.Body) return nil }) if !done { @@ -3332,11 +3593,15 @@ func (e evalEvery) eval(iter unifyIterator) error { } child.traceRedo(e.expr) - return err + + // We don't want to abort the generator domain enumeration with EE. + return suppressEarlyExit(err) }) + if err != nil { return err } + if all { err := iter() domain.traceExit(e.expr) @@ -3346,6 +3611,15 @@ func (e evalEvery) eval(iter unifyIterator) error { return nil } +// isIterableValue returns true if the AST value is an iterable type. +func isIterableValue(x ast.Value) bool { + switch x.(type) { + case *ast.Array, ast.Object, ast.Set: + return true + } + return false +} + func (e *evalEvery) save(iter unifyIterator) error { return e.e.saveExpr(e.plug(e.expr), e.e.bindings, iter) } @@ -3662,11 +3936,19 @@ func refContainsNonScalar(ref ast.Ref) bool { } func suppressEarlyExit(err error) error { - ee, ok := err.(*earlyExitError) - if !ok { - return err + if ee, ok := err.(*earlyExitError); ok { + return ee.prev + } else if oee, ok := err.(*deferredEarlyExitError); ok { + return oee.prev } - return ee.prev // nil if we're done + return err +} + +func withSuppressEarlyExit(f func() error) error { + if err := f(); err != nil { + return suppressEarlyExit(err) + } + return nil } func (e *eval) updateSavedMocks(withs []*ast.With) []*ast.With { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/vendor/github.com/open-policy-agent/opa/topdown/glob.go index 2afead73a2..116602db74 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/glob.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/glob.go @@ -10,6 +10,8 @@ import ( "github.com/open-policy-agent/opa/topdown/builtins" ) +const globCacheMaxSize = 100 + var globCacheLock = sync.Mutex{} var globCache map[string]glob.Glob @@ -64,6 +66,13 @@ func globCompileAndMatch(id, pattern, match string, delimiters []rune) (bool, er if p, err = glob.Compile(pattern, delimiters...); err != nil { return false, err } + if len(globCache) >= globCacheMaxSize { + // Delete a (semi-)random key to make room for the new one. + for k := range globCache { + delete(globCache, k) + break + } + } globCache[id] = p } out := p.Match(match) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/topdown/http.go index d4d67d85ec..9d01bc14b2 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/http.go @@ -68,6 +68,7 @@ var allowedKeyNames = [...]string{ "raise_error", "caching_mode", "max_retry_attempts", + "cache_ignored_headers", } // ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 @@ -114,52 +115,71 @@ const ( ) func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - req, err := validateHTTPRequestOperand(operands[0], 1) + + obj, err := builtins.ObjectOperand(operands[0].Value, 1) if err != nil { return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) } - raiseError, err := getRaiseErrorValue(req) + raiseError, err := getRaiseErrorValue(obj) if err != nil { return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) } - result, err := getHTTPResponse(bctx, req) + req, err := validateHTTPRequestOperand(operands[0], 1) if err != nil { if raiseError { return handleHTTPSendErr(bctx, err) } - obj := ast.NewObject() - obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0)) - - errObj := ast.NewObject() + return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err))) + } - switch err.(type) { - case *url.Error: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr)) - default: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr)) + result, err := getHTTPResponse(bctx, req) + if err != nil { + if raiseError { + return handleHTTPSendErr(bctx, err) } - errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error())) - obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj)) - - result = ast.NewTerm(obj) + result = generateRaiseErrorResult(err) } return iter(result) } +func generateRaiseErrorResult(err error) *ast.Term { + obj := ast.NewObject() + obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0)) + + errObj := ast.NewObject() + + switch err.(type) { + case *url.Error: + errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr)) + default: + errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr)) + } + + errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error())) + obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj)) + + return ast.NewTerm(obj) +} + func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { bctx.Metrics.Timer(httpSendLatencyMetricKey).Start() - reqExecutor, err := newHTTPRequestExecutor(bctx, req) + key, err := getKeyFromRequest(req) if err != nil { return nil, err } + reqExecutor, err := newHTTPRequestExecutor(bctx, req, key) + if err != nil { + return nil, err + } // Check if cache already has a response for this query + // set headers to exclude cache_ignored_headers resp, err := reqExecutor.CheckCache() if err != nil { return nil, err @@ -184,6 +204,43 @@ func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { return ast.NewTerm(resp), nil } +// getKeyFromRequest returns a key to be used for caching HTTP responses +// deletes headers from request object mentioned in cache_ignored_headers +func getKeyFromRequest(req ast.Object) (ast.Object, error) { + // deep copy so changes to key do not reflect in the request object + key := req.Copy() + cacheIgnoredHeadersTerm := req.Get(ast.StringTerm("cache_ignored_headers")) + allHeadersTerm := req.Get(ast.StringTerm("headers")) + // skip because no headers to delete + if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { + // need to explicitly set cache_ignored_headers to null + // equivalent requests might have different sets of exclusion lists + key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) + return key, nil + } + var cacheIgnoredHeaders []string + var allHeaders map[string]interface{} + err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) + if err != nil { + return nil, err + } + err = ast.As(allHeadersTerm.Value, &allHeaders) + if err != nil { + return nil, err + } + for _, header := range cacheIgnoredHeaders { + delete(allHeaders, header) + } + val, err := ast.InterfaceToValue(allHeaders) + if err != nil { + return nil, err + } + key.Insert(ast.StringTerm("headers"), ast.NewTerm(val)) + // remove cache_ignored_headers key + key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) + return key, nil +} + func init() { createAllowedKeys() createCacheableHTTPStatusCodes() @@ -289,7 +346,7 @@ func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transp u.RawQuery = v.Encode() tr := http.DefaultTransport.(*http.Transport).Clone() - tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) } tr.TLSClientConfig = tlsConfig @@ -468,7 +525,7 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt case "cache", "caching_mode", "force_cache", "force_cache_duration_seconds", "force_json_decode", "force_yaml_decode", - "raise_error", "max_retry_attempts": // no-op + "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op default: return nil, nil, fmt.Errorf("invalid parameter %q", key) } @@ -715,13 +772,13 @@ func newHTTPSendCache() *httpSendCache { } func valueHash(v util.T) int { - return v.(ast.Value).Hash() + return ast.StringTerm(v.(ast.Value).String()).Hash() } func valueEq(a, b util.T) bool { av := a.(ast.Value) bv := b.(ast.Value) - return av.Compare(bv) == 0 + return av.String() == bv.String() } func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { @@ -1368,20 +1425,21 @@ type httpRequestExecutor interface { // newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or // intra-query cache implementation -func newHTTPRequestExecutor(bctx BuiltinContext, key ast.Object) (httpRequestExecutor, error) { - useInterQueryCache, forceCacheParams, err := useInterQueryCache(key) +func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) { + useInterQueryCache, forceCacheParams, err := useInterQueryCache(req) if err != nil { return nil, handleHTTPSendErr(bctx, err) } if useInterQueryCache && bctx.InterQueryBuiltinCache != nil { - return newInterQueryCache(bctx, key, forceCacheParams) + return newInterQueryCache(bctx, req, key, forceCacheParams) } - return newIntraQueryCache(bctx, key) + return newIntraQueryCache(bctx, req, key) } type interQueryCache struct { bctx BuiltinContext + req ast.Object key ast.Object httpReq *http.Request httpClient *http.Client @@ -1390,8 +1448,8 @@ type interQueryCache struct { forceCacheParams *forceCacheParams } -func newInterQueryCache(bctx BuiltinContext, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { - return &interQueryCache{bctx: bctx, key: key, forceCacheParams: forceCacheParams}, nil +func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { + return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil } // CheckCache checks the cache for the value of the key set on this object @@ -1450,21 +1508,22 @@ func (c *interQueryCache) InsertErrorIntoCache(err error) { // ExecuteHTTPRequest executes a HTTP request func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { var err error - c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key) + c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - return executeHTTPRequest(c.httpReq, c.httpClient, c.key) + return executeHTTPRequest(c.httpReq, c.httpClient, c.req) } type intraQueryCache struct { bctx BuiltinContext + req ast.Object key ast.Object } -func newIntraQueryCache(bctx BuiltinContext, key ast.Object) (*intraQueryCache, error) { - return &intraQueryCache{bctx: bctx, key: key}, nil +func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) { + return &intraQueryCache{bctx: bctx, req: req, key: key}, nil } // CheckCache checks the cache for the value of the key set on this object @@ -1501,11 +1560,11 @@ func (c *intraQueryCache) InsertErrorIntoCache(err error) { // ExecuteHTTPRequest executes a HTTP request func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { - httpReq, httpClient, err := createHTTPRequest(c.bctx, c.key) + httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req) if err != nil { return nil, handleHTTPSendErr(c.bctx, err) } - return executeHTTPRequest(httpReq, httpClient, c.key) + return executeHTTPRequest(httpReq, httpClient, c.req) } func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go index 9d8fe50681..0cd4bc193a 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go @@ -45,7 +45,7 @@ var ( errBytesValueIncludesSpaces = parseNumBytesError("spaces not allowed in resource strings") ) -func builtinNumBytes(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinNumBytes(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var m big.Float raw, err := builtins.StringOperand(operands[0].Value, 1) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/topdown/providers.go index 113281b0f9..77db917982 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/providers.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/providers.go @@ -86,7 +86,7 @@ func validateAWSAuthParameters(o ast.Object) error { return nil } -func builtinAWSSigV4SignReq(ctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Request object. reqObj, err := builtins.ObjectOperand(operands[0].Value, 1) if err != nil { @@ -173,7 +173,19 @@ func builtinAWSSigV4SignReq(ctx BuiltinContext, operands []*ast.Term, iter func( // Sign the request object's headers, and reconstruct the headers map. headersMap := objectToMap(headers) - authHeader, awsHeadersMap := aws.SignV4(headersMap, method, theURL, body, service, awsCreds, signingTimestamp) + + // if payload signing config is set, pass it down to the signing method + disablePayloadSigning := false + t := awsConfigObj.Get(ast.StringTerm("disable_payload_signing")) + if t != nil { + if v, ok := t.Value.(ast.Boolean); ok { + disablePayloadSigning = bool(v) + } else { + return builtins.NewOperandErr(2, "invalid value for 'disable_payload_signing' in AWS config") + } + } + + authHeader, awsHeadersMap := aws.SignV4(headersMap, method, theURL, body, service, awsCreds, signingTimestamp, disablePayloadSigning) signedHeadersObj := ast.NewObject() // Restore original headers for k, v := range headersMap { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/topdown/query.go index 2b540c58a5..bbb4ba58f3 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/query.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/query.go @@ -58,6 +58,7 @@ type Query struct { strictObjects bool printHook print.Hook tracingOpts tracing.Options + virtualCache VirtualCache } // Builtin represents a built-in function that queries can call. @@ -291,6 +292,13 @@ func (q *Query) WithStrictObjects(yes bool) *Query { return q } +// WithVirtualCache sets the VirtualCache to use during evaluation. This is +// optional, and if not set, the default cache is used. +func (q *Query) WithVirtualCache(vc VirtualCache) *Query { + q.virtualCache = vc + return q +} + // PartialRun executes partial evaluation on the query with respect to unknown // values. Partial evaluation attempts to evaluate as much of the query as // possible without requiring values for the unknowns set on the query. The @@ -311,8 +319,17 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] if q.metrics == nil { q.metrics = metrics.New() } + f := &queryIDFactory{} b := newBindings(0, q.instr) + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + e := &eval{ ctx: ctx, metrics: q.metrics, @@ -340,7 +357,7 @@ func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support [] functionMocks: newFunctionMocksStack(), interQueryBuiltinCache: q.interQueryBuiltinCache, ndBuiltinCache: q.ndBuiltinCache, - virtualCache: newVirtualCache(), + virtualCache: vc, comprehensionCache: newComprehensionCache(), saveSet: newSaveSet(q.unknowns, b, q.instr), saveStack: newSaveStack(), @@ -488,7 +505,16 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { if q.metrics == nil { q.metrics = metrics.New() } + f := &queryIDFactory{} + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + e := &eval{ ctx: ctx, metrics: q.metrics, @@ -516,7 +542,7 @@ func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { functionMocks: newFunctionMocksStack(), interQueryBuiltinCache: q.interQueryBuiltinCache, ndBuiltinCache: q.ndBuiltinCache, - virtualCache: newVirtualCache(), + virtualCache: vc, comprehensionCache: newComprehensionCache(), genvarprefix: q.genvarprefix, runtime: q.runtime, diff --git a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/vendor/github.com/open-policy-agent/opa/topdown/reachable.go index 9cb15d51e3..8d61018e76 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/reachable.go @@ -31,7 +31,7 @@ func numberOfEdges(collection *ast.Term) int { return 0 } -func builtinReachable(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinReachable(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Error on wrong types for args. graph, err := builtins.ObjectOperand(operands[0].Value, 1) if err != nil { @@ -109,7 +109,7 @@ func pathBuilder(graph ast.Object, root *ast.Term, path []*ast.Term, edgeRslt as } -func builtinReachablePaths(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinReachablePaths(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { var traceResult = ast.NewSet() // Error on wrong types for args. graph, err := builtins.ObjectOperand(operands[0].Value, 1) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/topdown/regex.go index aa818f2dc9..877f19e233 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/regex.go @@ -15,6 +15,8 @@ import ( "github.com/open-policy-agent/opa/topdown/builtins" ) +const regexCacheMaxSize = 100 + var regexpCacheLock = sync.Mutex{} var regexpCache map[string]*regexp.Regexp @@ -111,6 +113,13 @@ func getRegexp(pat string) (*regexp.Regexp, error) { if err != nil { return nil, err } + if len(regexpCache) >= regexCacheMaxSize { + // Delete a (semi-)random key to make room for the new one. + for k := range regexpCache { + delete(regexpCache, k) + break + } + } regexpCache[pat] = re } return re, nil diff --git a/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/topdown/semver.go index ab5aed3eec..7bb7b9c183 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/semver.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/semver.go @@ -12,7 +12,7 @@ import ( "github.com/open-policy-agent/opa/topdown/builtins" ) -func builtinSemVerCompare(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { versionStringA, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err @@ -37,7 +37,7 @@ func builtinSemVerCompare(bctx BuiltinContext, operands []*ast.Term, iter func(* return iter(ast.IntNumberTerm(result)) } -func builtinSemVerIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { versionString, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return iter(ast.BooleanTerm(false)) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/topdown/strings.go index d5baa31972..d9e4a55e58 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/strings.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/strings.go @@ -16,7 +16,7 @@ import ( "github.com/open-policy-agent/opa/topdown/builtins" ) -func builtinAnyPrefixMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { a, b := operands[0].Value, operands[1].Value var strs []string @@ -50,7 +50,7 @@ func builtinAnyPrefixMatch(bctx BuiltinContext, operands []*ast.Term, iter func( return iter(ast.BooleanTerm(anyStartsWithAny(strs, prefixes))) } -func builtinAnySuffixMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { a, b := operands[0].Value, operands[1].Value var strsReversed []string @@ -310,6 +310,25 @@ func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return iter(ast.BooleanTerm(strings.Contains(string(s), string(substr)))) } +func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + substr, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + baseTerm := string(s) + searchTerm := string(substr) + + count := strings.Count(baseTerm, searchTerm) + + return iter(ast.IntNumberTerm(count)) +} + func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { @@ -384,12 +403,12 @@ func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) return err } - new, err := builtins.StringOperand(operands[2].Value, 3) + n, err := builtins.StringOperand(operands[2].Value, 3) if err != nil { return err } - return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(new), -1))) + return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(n), -1))) } func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -570,6 +589,7 @@ func init() { RegisterBuiltinFunc(ast.IndexOfN.Name, builtinIndexOfN) RegisterBuiltinFunc(ast.Substring.Name, builtinSubstring) RegisterBuiltinFunc(ast.Contains.Name, builtinContains) + RegisterBuiltinFunc(ast.StringCount.Name, builtinStringCount) RegisterBuiltinFunc(ast.StartsWith.Name, builtinStartsWith) RegisterBuiltinFunc(ast.EndsWith.Name, builtinEndsWith) RegisterBuiltinFunc(ast.Upper.Name, builtinUpper) diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go index b69f3f2d24..7457f1f15d 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go @@ -233,7 +233,7 @@ func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, veri } // Implements ES256 JWT signature verification. -func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha256.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) @@ -242,7 +242,7 @@ func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func( } // Implements ES384 JWT signature verification -func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New384, verifyES) if err == nil { return iter(ast.NewTerm(result)) @@ -251,7 +251,7 @@ func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func( } // Implements ES512 JWT signature verification -func builtinJWTVerifyES512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyES512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) @@ -413,7 +413,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify } // Implements HS256 (secret) JWT signature verification -func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyHS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Decode the JSON Web Token token, err := decodeJWT(operands[0].Value) if err != nil { @@ -442,7 +442,7 @@ func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func( } // Implements HS384 JWT signature verification -func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Decode the JSON Web Token token, err := decodeJWT(operands[0].Value) if err != nil { @@ -471,7 +471,7 @@ func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func( } // Implements HS512 JWT signature verification -func builtinJWTVerifyHS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Decode the JSON Web Token token, err := decodeJWT(operands[0].Value) if err != nil { @@ -720,8 +720,10 @@ func (constraints *tokenConstraints) validAudience(aud ast.Value) bool { // JWT algorithms -type tokenVerifyFunction func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error -type tokenVerifyAsymmetricFunction func(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error +type ( + tokenVerifyFunction func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error + tokenVerifyAsymmetricFunction func(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error +) // jwtAlgorithm describes a JWS 'alg' value type tokenAlgorithm struct { @@ -793,7 +795,7 @@ func verifyRSAPSS(key interface{}, hash crypto.Hash, digest []byte, signature [] return nil } -func verifyECDSA(key interface{}, hash crypto.Hash, digest []byte, signature []byte) (err error) { +func verifyECDSA(key interface{}, _ crypto.Hash, digest []byte, signature []byte) (err error) { defer func() { if r := recover(); r != nil { err = fmt.Errorf("ECDSA signature verification error: %v", r) @@ -912,7 +914,6 @@ func (header *tokenHeader) valid() bool { } func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, jwkSrc string, iter func(*ast.Term) error) error { - keys, err := jwk.ParseString(jwkSrc) if err != nil { return err @@ -946,21 +947,51 @@ func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, j if err != nil { return err } - return iter(ast.StringTerm(string(jwsCompact))) + return iter(ast.StringTerm(string(jwsCompact))) } func builtinJWTEncodeSign(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + inputHeadersAsJSON, err := ast.JSON(operands[0].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT headers for marshalling: %v", err) + } - inputHeaders := operands[0].String() - jwsPayload := operands[1].String() - jwkSrc := operands[2].String() - return commonBuiltinJWTEncodeSign(bctx, inputHeaders, jwsPayload, jwkSrc, iter) + inputHeadersBs, err := json.Marshal(inputHeadersAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT headers: %v", err) + } + + payloadAsJSON, err := ast.JSON(operands[1].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT payload for marshalling: %v", err) + } + payloadBs, err := json.Marshal(payloadAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT payload: %v", err) + } + + signatureAsJSON, err := ast.JSON(operands[2].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT signature for marshalling: %v", err) + } + + signatureBs, err := json.Marshal(signatureAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT signature: %v", err) + } + + return commonBuiltinJWTEncodeSign( + bctx, + string(inputHeadersBs), + string(payloadBs), + string(signatureBs), + iter, + ) } func builtinJWTEncodeSignRaw(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - jwkSrc, err := builtins.StringOperand(operands[2].Value, 3) if err != nil { return err @@ -1048,10 +1079,9 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func // Nested JWT, go round again with payload as first argument a = p.Value continue - } else { - // Non-nested JWT (or we've reached the bottom of the nesting). - break } + // Non-nested JWT (or we've reached the bottom of the nesting). + break } payload, err := extractJSONObject(string(p.Value.(ast.String))) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/topdown/trace.go index 727938d9c6..277c94b626 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/trace.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/trace.go @@ -5,8 +5,10 @@ package topdown import ( + "bytes" "fmt" "io" + "slices" "strings" iStrs "github.com/open-policy-agent/opa/internal/strings" @@ -18,7 +20,9 @@ import ( const ( minLocationWidth = 5 // len("query") maxIdealLocationWidth = 64 - locationPadding = 4 + columnPadding = 4 + maxExprVarWidth = 32 + maxPrettyExprVarWidth = 64 ) // Op defines the types of tracing events. @@ -62,7 +66,8 @@ const ( // UnifyOp is emitted when two terms are unified. Node will be set to an // equality expression with the two terms. This Node will not have location // info. - UnifyOp Op = "Unify" + UnifyOp Op = "Unify" + FailedAssertionOp Op = "FailedAssertion" ) // VarMetadata provides some user facing information about @@ -84,8 +89,14 @@ type Event struct { Message string // Contains message for Note events. Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations. - input *ast.Term - bindings *bindings + input *ast.Term + bindings *bindings + localVirtualCacheSnapshot *ast.ValueMap +} + +func (evt *Event) WithInput(input *ast.Term) *Event { + evt.input = input + return evt } // HasRule returns true if the Event contains an ast.Rule. @@ -236,31 +247,162 @@ func (b *BufferTracer) Config() TraceConfig { // PrettyTrace pretty prints the trace to the writer. func PrettyTrace(w io.Writer, trace []*Event) { - prettyTraceWith(w, trace, false) + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{}) } // PrettyTraceWithLocation prints the trace to the writer and includes location information func PrettyTraceWithLocation(w io.Writer, trace []*Event) { - prettyTraceWith(w, trace, true) + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true}) +} + +type PrettyTraceOptions struct { + Locations bool // Include location information + ExprVariables bool // Include variables found in the expression + LocalVariables bool // Include all local variables +} + +type traceRow []string + +func (r *traceRow) add(s string) { + *r = append(*r, s) +} + +type traceTable struct { + rows []traceRow + maxWidths []int +} + +func (t *traceTable) add(row traceRow) { + t.rows = append(t.rows, row) + for i := range row { + if i >= len(t.maxWidths) { + t.maxWidths = append(t.maxWidths, len(row[i])) + } else if len(row[i]) > t.maxWidths[i] { + t.maxWidths[i] = len(row[i]) + } + } +} + +func (t *traceTable) write(w io.Writer, padding int) { + for _, row := range t.rows { + for i, cell := range row { + width := t.maxWidths[i] + padding + if i < len(row)-1 { + _, _ = fmt.Fprintf(w, "%-*s ", width, cell) + } else { + _, _ = fmt.Fprintf(w, "%s", cell) + } + } + _, _ = fmt.Fprintln(w) + } } -func prettyTraceWith(w io.Writer, trace []*Event, locations bool) { +func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) { depths := depths{} - filePathAliases, longest := getShortenedFileNames(trace) + // FIXME: Can we shorten each location as we process each trace event instead of beforehand? + filePathAliases, _ := getShortenedFileNames(trace) - // Always include some padding between the trace and location - locationWidth := longest + locationPadding + table := traceTable{} for _, event := range trace { depth := depths.GetOrSet(event.QueryID, event.ParentID) - if locations { + row := traceRow{} + + if opts.Locations { location := formatLocation(event, filePathAliases) - fmt.Fprintf(w, "%-*s %s\n", locationWidth, location, formatEvent(event, depth)) - } else { - fmt.Fprintln(w, formatEvent(event, depth)) + row.add(location) + } + + row.add(formatEvent(event, depth)) + + if opts.ExprVariables { + vars := exprLocalVars(event) + keys := sortedKeys(vars) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) } + + if opts.LocalVariables { + if locals := event.Locals; locals != nil { + keys := sortedKeys(locals) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) + } else { + row.add("{}") + } + } + + table.add(row) } + + table.write(w, columnPadding) +} + +func sortedKeys(vm *ast.ValueMap) []ast.Value { + keys := make([]ast.Value, 0, vm.Len()) + vm.Iter(func(k, _ ast.Value) bool { + keys = append(keys, k) + return false + }) + slices.SortFunc(keys, func(a, b ast.Value) int { + return strings.Compare(a.String(), b.String()) + }) + return keys +} + +func exprLocalVars(e *Event) *ast.ValueMap { + vars := ast.NewValueMap() + + findVars := func(term *ast.Term) bool { + //if r, ok := term.Value.(ast.Ref); ok { + // fmt.Printf("ref: %v\n", r) + // //return true + //} + if name, ok := term.Value.(ast.Var); ok { + if meta, ok := e.LocalMetadata[name]; ok { + if val := e.Locals.Get(name); val != nil { + vars.Put(meta.Name, val) + } + } + } + return false + } + + if r, ok := e.Node.(*ast.Rule); ok { + // We're only interested in vars in the head, not the body + ast.WalkTerms(r.Head, findVars) + return vars + } + + // The local cache snapshot only contains a snapshot for those refs present in the event node, + // so they can all be added to the vars map. + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + vars.Put(k, v) + return false + }) + + ast.WalkTerms(e.Node, findVars) + + return vars } func formatEvent(event *Event, depth int) string { @@ -451,6 +593,310 @@ func rewrite(event *Event) *Event { return &cpy } +type varInfo struct { + VarMetadata + val ast.Value + exprLoc *ast.Location + col int // 0-indexed column +} + +func (v varInfo) Value() string { + if v.val != nil { + return v.val.String() + } + return "undefined" +} + +func (v varInfo) Title() string { + if v.exprLoc != nil && v.exprLoc.Text != nil { + return string(v.exprLoc.Text) + } + return string(v.Name) +} + +func padLocationText(loc *ast.Location) string { + if loc == nil { + return "" + } + + text := string(loc.Text) + + if loc.Col == 0 { + return text + } + + buf := new(bytes.Buffer) + j := 0 + for i := 1; i < loc.Col; i++ { + if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i { + buf.WriteString("\t") + j++ + } else { + buf.WriteString(" ") + } + } + + buf.WriteString(text) + return buf.String() +} + +type PrettyEventOpts struct { + PrettyVars bool +} + +func walkTestTerms(x interface{}, f func(*ast.Term) bool) { + var vis *ast.GenericVisitor + vis = ast.NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case ast.Call: + for _, t := range x[1:] { + vis.Walk(t) + } + return true + case *ast.Expr: + if x.IsCall() { + for _, o := range x.Operands() { + vis.Walk(o) + } + for i := range x.With { + vis.Walk(x.With[i]) + } + return true + } + case *ast.Term: + return f(x) + case *ast.With: + vis.Walk(x.Value) + return true + } + return false + }) + vis.Walk(x) +} + +func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error { + if !opts.PrettyVars { + _, _ = fmt.Fprintln(w, padLocationText(e.Location)) + return nil + } + + buf := new(bytes.Buffer) + exprVars := map[string]varInfo{} + + findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool { + return func(term *ast.Term) bool { + if term.Location == nil { + return false + } + + switch v := term.Value.(type) { + case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: + // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars. + return true + case ast.Var: + var info *varInfo + if meta, ok := e.LocalMetadata[v]; ok { + info = &varInfo{ + VarMetadata: meta, + val: e.Locals.Get(v), + exprLoc: term.Location, + } + } else if unknownAreUndefined { + info = &varInfo{ + VarMetadata: VarMetadata{Name: v}, + exprLoc: term.Location, + col: term.Location.Col, + } + } + + if info != nil { + if v, exists := exprVars[info.Title()]; !exists || v.val == nil { + if term.Location != nil { + info.col = term.Location.Col + } + exprVars[info.Title()] = *info + } + } + } + return false + } + } + + expr, ok := e.Node.(*ast.Expr) + if !ok || expr == nil { + return nil + } + + base := expr.BaseCogeneratedExpr() + exprText := padLocationText(base.Location) + buf.WriteString(exprText) + + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + var info *varInfo + switch k := k.(type) { + case ast.Ref: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k[0].Location, + col: k[0].Location.Col, + } + case *ast.ArrayComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.SetComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.ObjectComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Key.Location, + col: k.Key.Location.Col, + } + } + + if info != nil { + exprVars[info.Title()] = *info + } + + return false + }) + + // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined', + // since the compiler might have opted out of the necessary rewrite. + walkTestTerms(expr, findVars(!expr.Negated)) + coExprs := expr.CogeneratedExprs() + for _, coExpr := range coExprs { + // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr, + // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr + // hasn't been evaluated yet (the fail happened before it). + walkTestTerms(coExpr, findVars(false)) + } + + printPrettyVars(buf, exprVars) + _, _ = fmt.Fprint(w, buf.String()) + return nil +} + +func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { + containsTabs := false + varRows := make(map[int]interface{}) + for _, info := range exprVars { + if len(info.exprLoc.Tabs) > 0 { + containsTabs = true + } + varRows[info.exprLoc.Row] = nil + } + + if containsTabs && len(varRows) > 1 { + // We can't (currently) reliably point to var locations when they are on different rows that contain tabs. + // So we'll just print them in alphabetical order instead. + byName := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byName = append(byName, info) + } + slices.SortStableFunc(byName, func(a, b varInfo) int { + return strings.Compare(a.Title(), b.Title()) + }) + + w.WriteString("\n\nWhere:\n") + for _, info := range byName { + w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth))) + } + + return + } + + byCol := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byCol = append(byCol, info) + } + slices.SortFunc(byCol, func(a, b varInfo) int { + // sort first by column, then by reverse row (to present vars in the same order they appear in the expr) + if a.col == b.col { + if a.exprLoc.Row == b.exprLoc.Row { + return strings.Compare(a.Title(), b.Title()) + } + return b.exprLoc.Row - a.exprLoc.Row + } + return a.col - b.col + }) + + if len(byCol) == 0 { + return + } + + w.WriteString("\n") + printArrows(w, byCol, -1) + for i := len(byCol) - 1; i >= 0; i-- { + w.WriteString("\n") + printArrows(w, byCol, i) + } +} + +func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { + prevCol := 0 + var slice []varInfo + if printValueAt >= 0 { + slice = l[:printValueAt+1] + } else { + slice = l + } + isFirst := true + for i, info := range slice { + + isLast := i >= len(slice)-1 + col := info.col + + if !isLast && col == l[i+1].col { + // We're sharing the same column with another, subsequent var + continue + } + + spaces := col - 1 + if i > 0 && !isFirst { + spaces = (col - prevCol) - 1 + } + + for j := 0; j < spaces; j++ { + tab := false + for _, t := range info.exprLoc.Tabs { + if t == j+prevCol+1 { + w.WriteString("\t") + tab = true + break + } + } + if !tab { + w.WriteString(" ") + } + } + + if isLast && printValueAt >= 0 { + valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) + if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { + // There is another var on this column, so we need to include the name to differentiate them. + w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr)) + } else { + w.WriteString(valueStr) + } + } else { + w.WriteString("|") + } + prevCol = col + isFirst = false + } +} + func init() { RegisterBuiltinFunc(ast.Trace.Name, builtinTrace) } diff --git a/vendor/github.com/open-policy-agent/opa/types/decode.go b/vendor/github.com/open-policy-agent/opa/types/decode.go index 4e123384a0..a6bd9ea030 100644 --- a/vendor/github.com/open-policy-agent/opa/types/decode.go +++ b/vendor/github.com/open-policy-agent/opa/types/decode.go @@ -77,10 +77,10 @@ func Unmarshal(bs []byte) (result Type, err error) { } } case typeAny: - var any rawunion - if err = util.UnmarshalJSON(bs, &any); err == nil { + var union rawunion + if err = util.UnmarshalJSON(bs, &union); err == nil { var of []Type - if of, err = unmarshalSlice(any.Of); err == nil { + if of, err = unmarshalSlice(union.Of); err == nil { result = NewAny(of...) } } diff --git a/vendor/github.com/open-policy-agent/opa/types/types.go b/vendor/github.com/open-policy-agent/opa/types/types.go index a4b87cd554..2a050927dd 100644 --- a/vendor/github.com/open-policy-agent/opa/types/types.go +++ b/vendor/github.com/open-policy-agent/opa/types/types.go @@ -938,8 +938,8 @@ func Compare(a, b Type) int { // Contains returns true if a is a superset or equal to b. func Contains(a, b Type) bool { - if any, ok := unwrap(a).(Any); ok { - return any.Contains(b) + if x, ok := unwrap(a).(Any); ok { + return x.Contains(b) } return Compare(a, b) == 0 } @@ -994,8 +994,8 @@ func Select(a Type, x interface{}) Type { if Compare(a.of, tpe) == 0 { return a.of } - if any, ok := a.of.(Any); ok { - if any.Contains(tpe) { + if x, ok := a.of.(Any); ok { + if x.Contains(tpe) { return tpe } } diff --git a/vendor/github.com/open-policy-agent/opa/util/decoding/context.go b/vendor/github.com/open-policy-agent/opa/util/decoding/context.go new file mode 100644 index 0000000000..a817680f17 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/util/decoding/context.go @@ -0,0 +1,31 @@ +package decoding + +import "context" + +type requestContextKey string + +// Note(philipc): We can add functions later to add the max request body length +// to contexts, if we ever need to. +const ( + reqCtxKeyMaxLen = requestContextKey("server-decoding-plugin-context-max-length") + reqCtxKeyGzipMaxLen = requestContextKey("server-decoding-plugin-context-gzip-max-length") +) + +func AddServerDecodingMaxLen(ctx context.Context, maxLen int64) context.Context { + return context.WithValue(ctx, reqCtxKeyMaxLen, maxLen) +} + +func AddServerDecodingGzipMaxLen(ctx context.Context, maxLen int64) context.Context { + return context.WithValue(ctx, reqCtxKeyGzipMaxLen, maxLen) +} + +// Used for enforcing max body content limits when dealing with chunked requests. +func GetServerDecodingMaxLen(ctx context.Context) (int64, bool) { + maxLength, ok := ctx.Value(reqCtxKeyMaxLen).(int64) + return maxLength, ok +} + +func GetServerDecodingGzipMaxLen(ctx context.Context) (int64, bool) { + gzipMaxLength, ok := ctx.Value(reqCtxKeyGzipMaxLen).(int64) + return gzipMaxLength, ok +} diff --git a/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/util/hashmap.go index 11e7dca40b..8875a6323e 100644 --- a/vendor/github.com/open-policy-agent/opa/util/hashmap.go +++ b/vendor/github.com/open-policy-agent/opa/util/hashmap.go @@ -72,7 +72,7 @@ func (h *HashMap) Get(k T) (T, bool) { return nil, false } -// Delete removes the the key k. +// Delete removes the key k. func (h *HashMap) Delete(k T) { hash := h.hash(k) var prev *hashEntry diff --git a/vendor/github.com/open-policy-agent/opa/util/maps.go b/vendor/github.com/open-policy-agent/opa/util/maps.go new file mode 100644 index 0000000000..d943b4d0a8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/util/maps.go @@ -0,0 +1,10 @@ +package util + +// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go new file mode 100644 index 0000000000..217638b363 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go @@ -0,0 +1,81 @@ +package util + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/open-policy-agent/opa/util/decoding" +) + +var gzipReaderPool = sync.Pool{ + New: func() interface{} { + reader := new(gzip.Reader) + return reader + }, +} + +// Note(philipc): Originally taken from server/server.go +// The DecodingLimitHandler handles validating that the gzip payload is within the +// allowed max size limit. Thus, in the event of a forged payload size trailer, +// the worst that can happen is that we waste memory up to the allowed max gzip +// payload size, but not an unbounded amount of memory, as was potentially +// possible before. +func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { + var content *bytes.Buffer + // Note(philipc): If the request body is of unknown length (such as what + // happens when 'Transfer-Encoding: chunked' is set), we have to do an + // incremental read of the body. In this case, we can't be too clever, we + // just do the best we can with whatever is streamed over to us. + // Fetch gzip payload size limit from request context. + if maxLength, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok { + bs, err := io.ReadAll(io.LimitReader(r.Body, maxLength)) + if err != nil { + return bs, err + } + content = bytes.NewBuffer(bs) + } else { + // Read content from the request body into a buffer of known size. + content = bytes.NewBuffer(make([]byte, 0, r.ContentLength)) + if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil { + return content.Bytes(), err + } + } + + // Decompress gzip content by reading from the buffer. + if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { + // Fetch gzip payload size limit from request context. + gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context()) + + // Note(philipc): The last 4 bytes of a well-formed gzip blob will + // always be a little-endian uint32, representing the decompressed + // content size, modulo 2^32. We validate that the size is safe, + // earlier in DecodingLimitHandler. + sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:]) + if sizeTrailerField > uint32(gzipMaxLength) { + return content.Bytes(), fmt.Errorf("gzip payload too large") + } + // Pull a gzip decompressor from the pool, and assign it to the current + // buffer, using Reset(). Later, return it back to the pool for another + // request to use. + gzReader := gzipReaderPool.Get().(*gzip.Reader) + if err := gzReader.Reset(content); err != nil { + return nil, err + } + defer gzReader.Close() + defer gzipReaderPool.Put(gzReader) + decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField)) + if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil { + return decompressedContent.Bytes(), err + } + return decompressedContent.Bytes(), nil + } + + // Request was not compressed; return the content bytes. + return content.Bytes(), nil +} diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/version/version.go index 83188ccaeb..7dece01579 100644 --- a/vendor/github.com/open-policy-agent/opa/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/version/version.go @@ -11,7 +11,7 @@ import ( ) // Version is the canonical version of OPA. -var Version = "0.61.0" +var Version = "0.68.0" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() diff --git a/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go b/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go deleted file mode 100644 index aa6eec8556..0000000000 --- a/vendor/github.com/open-policy-agent/opa/version/version_go1.18.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2022 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package version diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore index a69e2b0ebd..4b7c4eda3a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.gitignore +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -3,4 +3,5 @@ fuzz/ cmd/tomll/tomll cmd/tomljson/tomljson cmd/tomltestgen/tomltestgen -dist \ No newline at end of file +dist +tests/ diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md index 04dd12bcbc..96ecf9e2b3 100644 --- a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -165,25 +165,22 @@ Checklist: ### New release -1. Decide on the next version number. Use semver. -2. Generate release notes using [`gh`][gh]. Example: +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: ``` -$ gh api -X POST \ - -F tag_name='v2.0.0-beta.5' \ - -F target_commitish='v2' \ - -F previous_tag_name='v2.0.0-beta.4' \ - --jq '.body' \ - repos/pelletier/go-toml/releases/generate-notes +git checkout v2 +git pull +git tag v2.2.0 +git push --tags ``` -3. Look for "Other changes". That would indicate a pull request not labeled - properly. Tweak labels and pull request titles until changelog looks good for - users. -4. [Draft new release][new-release]. -5. Fill tag and target with the same value used to generate the changelog. -6. Set title to the new tag value. -7. Paste the generated changelog. -8. Check "create discussion", in the "Releases" category. -9. Check pre-release if new version is an alpha or beta. +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + [issues-tracker]: https://github.com/pelletier/go-toml/issues [bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index 63b92f3b0b..d964b25fe1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -98,9 +98,9 @@ Given the following struct, let's see how to read it and write it as TOML: ```go type MyConfig struct { - Version int - Name string - Tags []string + Version int + Name string + Tags []string } ``` @@ -119,7 +119,7 @@ tags = ["go", "toml"] var cfg MyConfig err := toml.Unmarshal([]byte(doc), &cfg) if err != nil { - panic(err) + panic(err) } fmt.Println("version:", cfg.Version) fmt.Println("name:", cfg.Name) @@ -140,14 +140,14 @@ as a TOML document: ```go cfg := MyConfig{ - Version: 2, - Name: "go-toml", - Tags: []string{"go", "toml"}, + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, } b, err := toml.Marshal(cfg) if err != nil { - panic(err) + panic(err) } fmt.Println(string(b)) @@ -175,17 +175,17 @@ the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. Execution time speedup compared to other Go TOML libraries: - - - - - - - - - - - + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
See more

The table above has the results of the most common use-cases. The table below @@ -193,22 +193,22 @@ contains the results of all benchmarks, including unrealistic ones. It is provided for completeness.

- - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x

This table can be generated with ./ci.sh benchmark -a -html.

@@ -233,24 +233,24 @@ Go-toml provides three handy command line tools: * `tomljson`: Reads a TOML file and outputs its JSON representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` * `jsontoml`: Reads a JSON file and outputs a TOML representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` * `tomll`: Lints and reformats a TOML file. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` ### Docker image @@ -261,7 +261,7 @@ Those tools are also available as a [Docker image][docker]. For example, to use docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml ``` -Multiple versions are availble on [ghcr.io][docker]. +Multiple versions are available on [ghcr.io][docker]. [docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml @@ -293,16 +293,16 @@ element in the interface to decode the object. For example: ```go type inner struct { - B interface{} + B interface{} } type doc struct { - A interface{} + A interface{} } d := doc{ - A: inner{ - B: "Before", - }, + A: inner{ + B: "Before", + }, } data := ` @@ -341,7 +341,7 @@ contained in the doc is superior to the capacity of the array. For example: ```go type doc struct { - A [2]string + A [2]string } d := doc{} err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) @@ -565,10 +565,11 @@ complete solutions exist out there. ## Versioning -Go-toml follows [Semantic Versioning](https://semver.org). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). +Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). ## License diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md index b2f21cfc92..d4d554fda9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -2,9 +2,6 @@ ## Supported Versions -Use this section to tell people about which versions of your project are -currently being supported with security updates. - | Version | Supported | | ---------- | ------------------ | | Latest 2.x | :white_check_mark: | diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh index 9ae8b75375..86217a9b09 100644 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -77,7 +77,7 @@ cover() { pushd "$dir" go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... - cat coverage.out.tmp | grep -v fuzz | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out + grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out go tool cover -func=coverage.out echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 popd @@ -152,7 +152,7 @@ bench() { fi export GOMAXPROCS=2 - nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" popd if [ "${branch}" != "HEAD" ]; then @@ -161,10 +161,12 @@ bench() { } fmktemp() { - if mktemp --version|grep GNU >/dev/null; then - mktemp --suffix=-$1; + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 else - mktemp -t $1; + # BSD + mktemp -t $1 fi } @@ -184,12 +186,14 @@ with open(sys.argv[1]) as f: lines.append(line.split(',')) results = [] -for line in reversed(lines[1:]): +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue v2 = float(line[1]) results.append([ line[0].replace("-32", ""), "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[5])/v2), # bs + "%.1fx" % (float(line[7])/v2), # bs ]) # move geomean to the end results.append(results[0]) @@ -260,10 +264,10 @@ benchmark() { if [ "$1" = "-html" ]; then tmpcsv=`fmktemp csv` - benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv benchstathtml $tmpcsv else - benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt fi rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index 40e23f8304..76df2d5b6a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -57,7 +57,11 @@ type SeenTracker struct { currentIdx int } -var pool sync.Pool +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} func (s *SeenTracker) reset() { // Always contains a root element at index 0. @@ -149,8 +153,9 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are -// consistent. -func (s *SeenTracker) CheckExpression(node *unstable.Node) error { +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { if s.entries == nil { s.reset() } @@ -166,7 +171,7 @@ func (s *SeenTracker) CheckExpression(node *unstable.Node) error { } } -func (s *SeenTracker) checkTable(node *unstable.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -192,7 +197,7 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx @@ -201,25 +206,27 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) + first := false if idx >= 0 { kind := s.entries[idx].kind if kind != tableKind { - return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) } if s.entries[idx].explicit { - return fmt.Errorf("toml: table %s already exists", string(k)) + return false, fmt.Errorf("toml: table %s already exists", string(k)) } s.entries[idx].explicit = true } else { idx = s.create(parentIdx, k, tableKind, true, false) + first = true } s.currentIdx = idx - return nil + return first, nil } -func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -242,7 +249,7 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } @@ -252,22 +259,23 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) - if idx >= 0 { + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { kind := s.entries[idx].kind if kind != arrayTableKind { - return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) } s.clear(idx) - } else { - idx = s.create(parentIdx, k, arrayTableKind, true, false) } s.currentIdx = idx - return nil + return firstTime, nil } -func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { parentIdx := s.currentIdx it := node.Key() @@ -281,11 +289,11 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { } else { entry := s.entries[idx] if it.IsLast() { - return fmt.Errorf("toml: key %s is already defined", string(k)) + return false, fmt.Errorf("toml: key %s is already defined", string(k)) } else if entry.kind != tableKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } else if entry.explicit { - return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } @@ -303,45 +311,39 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { return s.checkArray(value) } - return nil + return false, nil } -func (s *SeenTracker) checkArray(node *unstable.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { case unstable.InlineTable: - err := s.checkInlineTable(n) + first, err = s.checkInlineTable(n) if err != nil { - return err + return false, err } case unstable.Array: - err := s.checkArray(n) + first, err = s.checkArray(n) if err != nil { - return err + return false, err } } } - return nil + return first, nil } -func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { - if pool.New == nil { - pool.New = func() interface{} { - return &SeenTracker{} - } - } - +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { s = pool.Get().(*SeenTracker) s.reset() it := node.Children() for it.Next() { n := it.Node() - err := s.checkKeyValue(n) + first, err = s.checkKeyValue(n) if err != nil { - return err + return false, err } } @@ -352,5 +354,5 @@ func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { // redefinition of its keys: check* functions cannot walk into // a value. pool.Put(s) - return nil + return first, nil } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 6fe78533c1..7f4e20c128 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -3,6 +3,7 @@ package toml import ( "bytes" "encoding" + "encoding/json" "fmt" "io" "math" @@ -37,10 +38,11 @@ type Encoder struct { w io.Writer // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool } // NewEncoder returns a new Encoder that writes to w. @@ -87,6 +89,17 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { return enc } +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + // Encode writes a TOML representation of v to the stream. // // If v cannot be represented to TOML it returns an error. @@ -252,6 +265,18 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e return append(b, x.String()...), nil case LocalDateTime: return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } } hasTextMarshaler := v.Type().Implements(textMarshalerType) @@ -707,6 +732,8 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) } continue } else { @@ -998,6 +1025,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. scratch = enc.commented(ctx.commented, scratch) + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 868c74c157..98231bae65 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -35,6 +35,9 @@ type Decoder struct { // global settings strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool } // NewDecoder creates a new Decoder that will read from r. @@ -54,6 +57,24 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { return d } +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + // Decode the whole content of r into v. // // By default, values in the document that don't exist in the target Go value @@ -108,6 +129,7 @@ func (d *Decoder) Decode(v interface{}) error { strict: strict{ Enabled: d.strict, }, + unmarshalerInterface: d.unmarshalerInterface, } return dec.FromParser(v) @@ -127,6 +149,10 @@ type decoder struct { // need to be skipped. skipUntilTable bool + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + // Tracks position in Go arrays. // This is used when decoding [[array tables]] into Go arrays. Given array // tables are separate TOML expression, we need to keep track of where we @@ -139,6 +165,9 @@ type decoder struct { // Strict mode strict strict + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + // Current context for the error. errorContext *errorContext } @@ -246,9 +275,10 @@ Rules for the unmarshal code: func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error + var first bool // used for to clear array tables on first use if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - err = d.seen.CheckExpression(expr) + first, err = d.seen.CheckExpression(expr) if err != nil { return err } @@ -267,6 +297,7 @@ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) err case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) + d.clearArrayTable = first x, err = d.handleArrayTable(expr.Key(), v) default: panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) @@ -307,6 +338,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec reflect.Copy(nelem, elem) elem = nelem } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } } return d.handleArrayTableCollectionLast(key, elem) case reflect.Ptr: @@ -325,6 +360,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec return v, nil case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } elemType := v.Type().Elem() var elem reflect.Value if elemType.Kind() == reflect.Interface { @@ -576,7 +615,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { break } - err := d.seen.CheckExpression(expr) + _, err := d.seen.CheckExpression(expr) if err != nil { return reflect.Value{}, err } @@ -634,6 +673,14 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { v = initAndDereferencePointer(v) } + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + ok, err := d.tryTextUnmarshaler(value, v) if ok || err != nil { return err @@ -1097,9 +1144,9 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node f := fieldByIndex(v, path) - if !f.CanSet() { - // If the field is not settable, need to take a slower path and make a copy of - // the struct itself to a new location. + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. nvp := reflect.New(v.Type()) nvp.Elem().Set(v) v = nvp.Elem() diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 0000000000..00cfd6de45 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE new file mode 100644 index 0000000000..49ea0f9288 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go new file mode 100644 index 0000000000..a8edafb3c3 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go symlink_windows.go diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go new file mode 100644 index 0000000000..b138e658a9 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr +} + +//sys GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) = psapi.GetProcessMemoryInfo diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go new file mode 100644 index 0000000000..7c6ad8fb7e --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + FSCTL_SET_REPARSE_POINT = 0x000900A4 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + + SYMLINK_FLAG_RELATIVE = 1 +) + +// These structures are described +// in https://msdn.microsoft.com/en-us/library/cc232007.aspx +// and https://msdn.microsoft.com/en-us/library/cc232006.aspx. + +// REPARSE_DATA_BUFFER_HEADER is a common part of REPARSE_DATA_BUFFER structure. +type REPARSE_DATA_BUFFER_HEADER struct { + ReparseTag uint32 + // The size, in bytes, of the reparse data that follows + // the common portion of the REPARSE_DATA_BUFFER element. + // This value is the length of the data starting at the + // SubstituteNameOffset field. + ReparseDataLength uint16 + Reserved uint16 +} + +type SymbolicLinkReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + // Flags specifies whether the substitute name is a full path name or + // a path name relative to the directory containing the symbolic link. + Flags uint32 + PathBuffer [1]uint16 +} + +type MountPointReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + PathBuffer [1]uint16 +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go new file mode 100644 index 0000000000..4a2dfc0c73 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf + +const ( + TOKEN_ADJUST_PRIVILEGES = 0x0020 + SE_PRIVILEGE_ENABLED = 0x00000002 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUID_AND_ATTRIBUTES struct { + Luid LUID + Attributes uint32 +} + +type TOKEN_PRIVILEGES struct { + PrivilegeCount uint32 + Privileges [1]LUID_AND_ATTRIBUTES +} + +//sys OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) = advapi32.OpenThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) [true] = advapi32.AdjustTokenPrivileges + +func AdjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) error { + ret, err := adjustTokenPrivileges(token, disableAllPrivileges, newstate, buflen, prevstate, returnlen) + if ret == 0 { + // AdjustTokenPrivileges call failed + return err + } + // AdjustTokenPrivileges call succeeded + if err == syscall.EINVAL { + // GetLastError returned ERROR_SUCCESS + return nil + } + return err +} + +//sys DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) = advapi32.DuplicateTokenEx +//sys SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) = advapi32.SetTokenInformation + +type SID_AND_ATTRIBUTES struct { + Sid *syscall.SID + Attributes uint32 +} + +type TOKEN_MANDATORY_LABEL struct { + Label SID_AND_ATTRIBUTES +} + +func (tml *TOKEN_MANDATORY_LABEL) Size() uint32 { + return uint32(unsafe.Sizeof(TOKEN_MANDATORY_LABEL{})) + syscall.GetLengthSid(tml.Label.Sid) +} + +const SE_GROUP_INTEGRITY = 0x00000020 + +type TokenType uint32 + +const ( + TokenPrimary TokenType = 1 + TokenImpersonation TokenType = 2 +) + +//sys GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) = userenv.GetProfilesDirectoryW + +const ( + LG_INCLUDE_INDIRECT = 0x1 + MAX_PREFERRED_LENGTH = 0xFFFFFFFF +) + +type LocalGroupUserInfo0 struct { + Name *uint16 +} + +type UserInfo4 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 + AuthFlags uint32 + FullName *uint16 + UsrComment *uint16 + Parms *uint16 + Workstations *uint16 + LastLogon uint32 + LastLogoff uint32 + AcctExpires uint32 + MaxStorage uint32 + UnitsPerWeek uint32 + LogonHours *byte + BadPwCount uint32 + NumLogons uint32 + LogonServer *uint16 + CountryCode uint32 + CodePage uint32 + UserSid *syscall.SID + PrimaryGroupID uint32 + Profile *uint16 + HomeDirDrive *uint16 + PasswordExpired uint32 +} + +//sys NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) = netapi32.NetUserGetLocalGroups diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go new file mode 100644 index 0000000000..b64d058d13 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_INVALID_PARAMETER syscall.Errno = 87 + + // symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972) + SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2 + + // FileInformationClass values + FileBasicInfo = 0 // FILE_BASIC_INFO + FileStandardInfo = 1 // FILE_STANDARD_INFO + FileNameInfo = 2 // FILE_NAME_INFO + FileStreamInfo = 7 // FILE_STREAM_INFO + FileCompressionInfo = 8 // FILE_COMPRESSION_INFO + FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO + FileIdBothDirectoryInfo = 0xa // FILE_ID_BOTH_DIR_INFO + FileIdBothDirectoryRestartInfo = 0xb // FILE_ID_BOTH_DIR_INFO + FileRemoteProtocolInfo = 0xd // FILE_REMOTE_PROTOCOL_INFO + FileFullDirectoryInfo = 0xe // FILE_FULL_DIR_INFO + FileFullDirectoryRestartInfo = 0xf // FILE_FULL_DIR_INFO + FileStorageInfo = 0x10 // FILE_STORAGE_INFO + FileAlignmentInfo = 0x11 // FILE_ALIGNMENT_INFO + FileIdInfo = 0x12 // FILE_ID_INFO + FileIdExtdDirectoryInfo = 0x13 // FILE_ID_EXTD_DIR_INFO + FileIdExtdDirectoryRestartInfo = 0x14 // FILE_ID_EXTD_DIR_INFO +) + +type FILE_ATTRIBUTE_TAG_INFO struct { + FileAttributes uint32 + ReparseTag uint32 +} + +//sys GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go new file mode 100644 index 0000000000..121132f6f7 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go @@ -0,0 +1,307 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "syscall" + "unsafe" +) + +const ( + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW + +const ( + WSA_FLAG_OVERLAPPED = 0x01 + WSA_FLAG_NO_HANDLE_INHERIT = 0x80 + + WSAEMSGSIZE syscall.Errno = 10040 + + MSG_PEEK = 0x2 + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + + socket_error = uintptr(^uint32(0)) +) + +var WSAID_WSASENDMSG = syscall.GUID{ + Data1: 0xa441e712, + Data2: 0x754f, + Data3: 0x43ca, + Data4: [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = syscall.GUID{ + Data1: 0xf689d7c8, + Data2: 0x6f1f, + Data3: 0x436b, + Data4: [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *syscall.WSABuf + BufferCount uint32 + Control syscall.WSABuf + Flags uint32 +} + +//sys WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = ws2_32.WSASocketW + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s syscall.Handle + s, sendRecvMsgFunc.err = syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer syscall.CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd syscall.Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd syscall.Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 + + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +func Rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +//sys LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.LockFileEx +//sys UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.UnlockFileEx + +const ( + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 +) + +const MB_ERR_INVALID_CHARS = 8 + +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys GetConsoleCP() (ccp uint32) = kernel32.GetConsoleCP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys GetCurrentThread() (pseudoHandle syscall.Handle, err error) = kernel32.GetCurrentThread + +const STYPE_DISKTREE = 0x00 + +type SHARE_INFO_2 struct { + Netname *uint16 + Type uint32 + Remark *uint16 + Permissions uint32 + MaxUses uint32 + CurrentUses uint32 + Path *uint16 + Passwd *uint16 +} + +//sys NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) = netapi32.NetShareAdd +//sys NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) = netapi32.NetShareDel + +const ( + FILE_NAME_NORMALIZED = 0x0 + FILE_NAME_OPENED = 0x8 + + VOLUME_NAME_DOS = 0x0 + VOLUME_NAME_GUID = 0x1 + VOLUME_NAME_NONE = 0x4 + VOLUME_NAME_NT = 0x2 +) + +//sys GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW + +func LoadGetFinalPathNameByHandle() error { + return procGetFinalPathNameByHandleW.Find() +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go new file mode 100644 index 0000000000..4e0018f387 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sysdll is an internal leaf package that records and reports +// which Windows DLL names are used by Go itself. These DLLs are then +// only loaded from the System32 directory. See Issue 14959. +package sysdll + +// IsSystemDLL reports whether the named dll key (a base name, like +// "foo.dll") is a system DLL which should only be loaded from the +// Windows SYSTEM32 directory. +// +// Filenames are case sensitive, but that doesn't matter because +// the case registered with Add is also the same case used with +// LoadDLL later. +// +// It has no associated mutex and should only be mutated serially +// (currently: during init), and not concurrent with DLL loading. +var IsSystemDLL = map[string]bool{} + +// Add notes that dll is a system32 DLL which should only be loaded +// from the Windows SYSTEM32 directory. It returns its argument back, +// for ease of use in generated code. +func Add(dll string) string { + IsSystemDLL[dll] = true + return dll +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go new file mode 100644 index 0000000000..3ed2d9fe01 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go @@ -0,0 +1,363 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" + + "github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll")) + modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll")) + modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll")) + modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll")) + modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll")) + moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll")) + modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll")) + + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procWSASocketW = modws2_32.NewProc("WSASocketW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procGetACP = modkernel32.NewProc("GetACP") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procNetShareAdd = modnetapi32.NewProc("NetShareAdd") + procNetShareDel = modnetapi32.NewProc("NetShareDel") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") + procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") +) + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nameformat), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetConsoleCP() (ccp uint32) { + r0, _, _ := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + ccp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThread() (pseudoHandle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + pseudoHandle = syscall.Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetShareAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetShareDel.Addr(), 3, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(netName)), uintptr(reserved)) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) { + var _p0 uint32 + if openasself { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(h), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + ret = uint32(r0) + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(hExistingToken), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpTokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(phNewToken))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProfilesDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserGetLocalGroups.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(flags), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(memCounters)), uintptr(cb)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go new file mode 100644 index 0000000000..05f27c321a --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "io/fs" + "os" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (fs.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by ErrNotSupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return isNotSupported(underlyingError(err)) +} + +var ErrNotSupported = errors.New("operation not supported") + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *fs.PathError: + return err.Err + case *os.LinkError: + return err.Err + case *os.SyscallError: + return err.Err + } + return err +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go new file mode 100644 index 0000000000..8568048507 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -0,0 +1,214 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || (solaris && !illumos) + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// Most platforms provide some alternative API, such as an 'flock' system call +// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and +// does not require per-inode bookkeeping in the application. + +package filelock + +import ( + "errors" + "io" + "io/fs" + "math/rand" + "sync" + "syscall" + "time" +) + +type lockType int16 + +const ( + readLock lockType = syscall.F_RDLCK + writeLock lockType = syscall.F_WRLCK +) + +type inode = uint64 // type of syscall.Stat_t.Ino + +type inodeLock struct { + owner File + queue []<-chan File +} + +var ( + mu sync.Mutex + inodes = map[File]inode{} + locks = map[inode]inodeLock{} +) + +func lock(f File, lt lockType) (err error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.Stat() + if err != nil { + return err + } + ino := fi.Sys().(*syscall.Stat_t).Ino + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + inodes[f] = ino + + var wait chan File + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else { + // Already owned: add a channel to wait on. + wait = make(chan File) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at + // the process, rather than thread, level. Consider processes P and Q, with + // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be + // reported as a deadlock on systems that consider only process granularity: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (This is erroneously reported as a deadlock.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + // + // These spurious errors were observed in practice on AIX and Solaris in + // cmd/go: see https://golang.org/issue/32817. + // + // We work around this bug by treating EDEADLK as always spurious. If there + // really is a lock-ordering bug between the interacting processes, it will + // become a livelock instead, but that's not appreciably worse than if we had + // a proper flock implementation (which generally does not even attempt to + // diagnose deadlocks). + // + // In the above example, that changes the trace to: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 spuriously fails to lock file B and goes to sleep. + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 wakes up and locks file B. + // P.2 unlocks file B. + // + // We know that the retry loop will not introduce a *spurious* livelock + // because, according to the POSIX specification, EDEADLK is only to be + // returned when “the lock is blocked by a lock from another process”. + // If that process is blocked on some lock that we are holding, then the + // resulting livelock is due to a real deadlock (and would manifest as such + // when using, for example, the flock implementation of this package). + // If the other process is *not* blocked on some other lock that we are + // holding, then it will eventually release the requested lock. + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + err = setlkw(f.Fd(), lt) + if err != syscall.EDEADLK { + break + } + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions when we finally unblock. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } + + if err != nil { + unlock(f) + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + + return nil +} + +func unlock(f File) error { + var owner File + + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner != f { + panic("unlock called on a file that is not locked") + } + + err := setlkw(f.Fd(), syscall.F_UNLCK) + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. +func setlkw(fd uintptr, lt lockType) error { + for { + err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != syscall.EINTR { + return err + } + } +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go new file mode 100644 index 0000000000..7bdd62bd9b --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go @@ -0,0 +1,36 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix && !windows + +package filelock + +import "io/fs" + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go new file mode 100644 index 0000000000..d7778d05de --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd + +package filelock + +import ( + "io/fs" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go new file mode 100644 index 0000000000..ceab65b02a --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package filelock + +import ( + "io/fs" + "syscall" + + "github.com/rogpeppe/go-internal/internal/syscall/windows" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +func lock(f File, lt lockType) error { + // Per https://golang.org/issue/19098, “Programs currently expect the Fd + // method to return a handle that uses ordinary synchronous I/O.” + // However, LockFileEx still requires an OVERLAPPED structure, + // which contains the file offset of the beginning of the lock range. + // We want to lock the entire file, so we leave the offset as zero. + ol := new(syscall.Overlapped) + + err := windows.LockFileEx(syscall.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + ol := new(syscall.Overlapped) + err := windows.UnlockFileEx(syscall.Handle(f.Fd()), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: err, + } + } + return nil +} + +func isNotSupported(err error) bool { + switch err { + case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported: + return true + default: + return false + } +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go new file mode 100644 index 0000000000..82e1a89675 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/fs" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &fs.PathError{ + Op: "close", + Path: f.Name(), + Err: fs.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return io.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm fs.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} + +// Transform invokes t with the result of reading the named file, with its lock +// still held. +// +// If t returns a nil error, Transform then writes the returned contents back to +// the file, making a best effort to preserve existing contents on error. +// +// t must not modify the slice passed to it. +func Transform(name string, t func([]byte) ([]byte, error)) (err error) { + f, err := Edit(name) + if err != nil { + return err + } + defer f.Close() + + old, err := io.ReadAll(f) + if err != nil { + return err + } + + new, err := t(old) + if err != nil { + return err + } + + if len(new) > len(old) { + // The overall file size is increasing, so write the tail first: if we're + // about to run out of space on the disk, we would rather detect that + // failure before we have overwritten the original contents. + if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil { + // Make a best effort to remove the incomplete tail. + f.Truncate(int64(len(old))) + return err + } + } + + // We're about to overwrite the old contents. In case of failure, make a best + // effort to roll back before we close the file. + defer func() { + if err != nil { + if _, err := f.WriteAt(old, 0); err == nil { + f.Truncate(int64(len(old))) + } + } + }() + + if len(new) >= len(old) { + if _, err := f.WriteAt(new[:len(old)], 0); err != nil { + return err + } + } else { + if _, err := f.WriteAt(new, 0); err != nil { + return err + } + // The overall file size is decreasing, so shrink the file to its final size + // after writing. We do this after writing (instead of before) so that if + // the write fails, enough filesystem space will likely still be reserved + // to contain the previous contents. + if err := f.Truncate(int64(len(new))); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go new file mode 100644 index 0000000000..454c3a42c4 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package lockedfile + +import ( + "io/fs" + "os" + + "github.com/rogpeppe/go-internal/lockedfile/internal/filelock" +) + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says “if possible, truncate file when + // opened”, but doesn't define “possible” (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as “possible” + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go new file mode 100644 index 0000000000..a2ce794b96 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package lockedfile + +import ( + "io/fs" + "math/rand" + "os" + "strings" + "time" +) + +// Opening an exclusive-use file returns an error. +// The expected error strings are: +// +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) +var lockedErrStrings = [...]string{ + "file is locked", + "exclusive lock", + "exclusive use file already open", +} + +// Even though plan9 doesn't support the Lock/RLock/Unlock functions to +// manipulate already-open files, IsLocked is still meaningful: os.OpenFile +// itself may return errors that indicate that a file with the ModeExclusive bit +// set is already open. +func isLocked(err error) bool { + s := err.Error() + + for _, frag := range lockedErrStrings { + if strings.Contains(s, frag) { + return true + } + } + + return false +} + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // So we can try to open a locked file, but if it fails we're on our own to + // figure out when it becomes available. We'll use exponential backoff with + // some jitter and an arbitrary limit of 500ms. + + // If the file was unpacked or created by some other program, it might not + // have the ModeExclusive bit set. Set it before we call OpenFile, so that we + // can be confident that a successful OpenFile implies exclusive use. + if fi, err := os.Stat(name); err == nil { + if fi.Mode()&fs.ModeExclusive == 0 { + if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil { + return nil, err + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive) + if err == nil { + return f, nil + } + + if !isLocked(err) { + return nil, err + } + + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } +} + +func closeFile(f *os.File) error { + return f.Close() +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go new file mode 100644 index 0000000000..180a36c620 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" + "sync" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. + mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + mu.mu.Lock() + + return func() { + mu.mu.Unlock() + f.Close() + }, nil +} diff --git a/vendor/github.com/rogpeppe/go-internal/robustio/robustio.go b/vendor/github.com/rogpeppe/go-internal/robustio/robustio.go new file mode 100644 index 0000000000..15b33773cf --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/robustio/robustio.go @@ -0,0 +1,53 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} diff --git a/vendor/github.com/rogpeppe/go-internal/robustio/robustio_darwin.go b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_darwin.go new file mode 100644 index 0000000000..99fd8ebc2f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/github.com/rogpeppe/go-internal/robustio/robustio_flaky.go b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_flaky.go new file mode 100644 index 0000000000..c56e36ca62 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_flaky.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin + +package robustio + +import ( + "errors" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = os.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/github.com/rogpeppe/go-internal/robustio/robustio_other.go b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_other.go new file mode 100644 index 0000000000..da9a46e4fa --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_other.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin + +package robustio + +import ( + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/github.com/rogpeppe/go-internal/robustio/robustio_windows.go b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_windows.go new file mode 100644 index 0000000000..961de4ef07 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/robustio/robustio_windows.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + + "github.com/rogpeppe/go-internal/internal/syscall/windows" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + windows.ERROR_SHARING_VIOLATION: + return true + } + } + return false +} diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go index de555d47b2..cc3e12ca9b 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go @@ -38,9 +38,10 @@ import ( ) const ( - flowNormal = "normal" - flowDevice = "device" - flowToken = "token" + flowNormal = "normal" + flowDevice = "device" + flowToken = "token" + flowClientCredentials = "client_credentials" ) type oidcConnector interface { @@ -89,6 +90,8 @@ func getCertForOauthID(sv signature.SignerVerifier, fc api.LegacyClient, connect func GetCert(_ context.Context, sv signature.SignerVerifier, idToken, flow, oidcIssuer, oidcClientID, oidcClientSecret, oidcRedirectURL string, fClient api.LegacyClient) (*api.CertificateResponse, error) { c := &realConnector{} switch flow { + case flowClientCredentials: + c.flow = oauthflow.NewClientCredentialsFlow(oidcIssuer) case flowDevice: c.flow = oauthflow.NewDeviceFlowTokenGetterForIssuer(oidcIssuer) case flowNormal: diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate.go index ef5cac0cea..ee2e6b459a 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate.go @@ -17,7 +17,6 @@ package generate import ( "context" - "fmt" "io" "github.com/google/go-containerregistry/pkg/name" @@ -49,6 +48,6 @@ func GenerateCmd(ctx context.Context, regOpts options.RegistryOptions, imageRef if err != nil { return err } - fmt.Fprint(w, string(json)) + w.Write(json) return nil } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate_key_pair.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate_key_pair.go index 6311fe0761..2329f51b81 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate_key_pair.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/generate/generate_key_pair.go @@ -114,7 +114,7 @@ func writeKeyFiles(privateKeyFileName string, publicKeyFileName string, keys *co } fmt.Fprintln(os.Stderr, "Private key written to", privateKeyFileName) - if err := os.WriteFile(publicKeyFileName, keys.PublicBytes, 0644); err != nil { + if err := os.WriteFile(publicKeyFileName, keys.PublicBytes, 0644); err != nil { //nolint: gosec return err } // #nosec G306 fmt.Fprintln(os.Stderr, "Public key written to", publicKeyFileName) diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest.go index 9090ce1d9c..8139cddaef 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest.go @@ -21,16 +21,17 @@ import ( // AttestOptions is the top level wrapper for the attest command. type AttestOptions struct { - Key string - Cert string - CertChain string - NoUpload bool - Recursive bool - Replace bool - SkipConfirmation bool - TlogUpload bool - TSAServerURL string - RekorEntryType string + Key string + Cert string + CertChain string + NoUpload bool + Recursive bool + Replace bool + SkipConfirmation bool + TlogUpload bool + TSAServerURL string + RekorEntryType string + RecordCreationTimestamp bool Rekor RekorOptions Fulcio FulcioOptions @@ -86,4 +87,7 @@ func (o *AttestOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.TSAServerURL, "timestamp-server-url", "", "url to the Timestamp RFC3161 server, default none. Must be the path to the API to request timestamp responses, e.g. https://freetsa.org/tsr") + + cmd.Flags().BoolVar(&o.RecordCreationTimestamp, "record-creation-timestamp", false, + "set the createdAt timestamp in the attestation artifact to the time it was created; by default, cosign sets this to the zero value") } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest_blob.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest_blob.go index 593c787eee..b6f48b4967 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest_blob.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/attest_blob.go @@ -36,6 +36,7 @@ type AttestBlobOptions struct { OutputAttestation string OutputCertificate string BundlePath string + NewBundleFormat bool RekorEntryType string @@ -85,6 +86,10 @@ func (o *AttestBlobOptions) AddFlags(cmd *cobra.Command) { "write everything required to verify the blob to a FILE") _ = cmd.Flags().SetAnnotation("bundle", cobra.BashCompFilenameExt, []string{}) + // TODO: have this default to true as a breaking change + cmd.Flags().BoolVar(&o.NewBundleFormat, "new-bundle-format", false, + "output bundle in new format that contains all verification material") + cmd.Flags().StringVar(&o.Hash, "hash", "", "hash of blob in hexadecimal (base16). Used if you want to sign an artifact stored elsewhere and have the hash") diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/certificate.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/certificate.go index e89f257f9e..3df7b4b962 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/certificate.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/certificate.go @@ -33,6 +33,8 @@ type CertVerifyOptions struct { CertGithubWorkflowName string CertGithubWorkflowRepository string CertGithubWorkflowRef string + CAIntermediates string + CARoots string CertChain string SCT string IgnoreSCT bool @@ -75,12 +77,25 @@ func (o *CertVerifyOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.CertGithubWorkflowRef, "certificate-github-workflow-ref", "", "contains the ref claim from the GitHub OIDC Identity token that contains the git ref that the workflow run was based upon.") // -- Cert extensions end -- + cmd.Flags().StringVar(&o.CAIntermediates, "ca-intermediates", "", + "path to a file of intermediate CA certificates in PEM format which will be needed "+ + "when building the certificate chains for the signing certificate. "+ + "The flag is optional and must be used together with --ca-roots, conflicts with "+ + "--certificate-chain.") + _ = cmd.Flags().SetAnnotation("ca-intermediates", cobra.BashCompFilenameExt, []string{"cert"}) + cmd.Flags().StringVar(&o.CARoots, "ca-roots", "", + "path to a bundle file of CA certificates in PEM format which will be needed "+ + "when building the certificate chains for the signing certificate. Conflicts with --certificate-chain.") + _ = cmd.Flags().SetAnnotation("ca-roots", cobra.BashCompFilenameExt, []string{"cert"}) + cmd.Flags().StringVar(&o.CertChain, "certificate-chain", "", "path to a list of CA certificates in PEM format which will be needed "+ "when building the certificate chain for the signing certificate. "+ "Must start with the parent intermediate CA certificate of the "+ - "signing certificate and end with the root certificate") + "signing certificate and end with the root certificate. Conflicts with --ca-roots and --ca-intermediates.") _ = cmd.Flags().SetAnnotation("certificate-chain", cobra.BashCompFilenameExt, []string{"cert"}) + cmd.MarkFlagsMutuallyExclusive("ca-roots", "certificate-chain") + cmd.MarkFlagsMutuallyExclusive("ca-intermediates", "certificate-chain") cmd.Flags().StringVar(&o.SCT, "sct", "", "path to a detached Signed Certificate Timestamp, formatted as a RFC6962 AddChainResponse struct. "+ diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/fulcio.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/fulcio.go index 291710c077..139731a77c 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/fulcio.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/fulcio.go @@ -24,6 +24,7 @@ const DefaultFulcioURL = "https://fulcio.sigstore.dev" // FulcioOptions is the wrapper for Fulcio related options. type FulcioOptions struct { URL string + AuthFlow string IdentityToken string InsecureSkipFulcioVerify bool } @@ -39,6 +40,9 @@ func (o *FulcioOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.IdentityToken, "identity-token", "", "identity token to use for certificate from fulcio. the token or a path to a file containing the token is accepted.") + cmd.Flags().StringVar(&o.AuthFlow, "fulcio-auth-flow", "", + "fulcio interactive oauth2 flow to use for certificate from fulcio. Defaults to determining the flow based on the runtime environment. (options) normal|device|token|client_credentials") + cmd.Flags().BoolVar(&o.InsecureSkipFulcioVerify, "insecure-skip-verify", false, "skip verifying fulcio published to the SCT (this should only be used for testing).") } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go index 6af69afda3..634911fdb5 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/key.go @@ -32,6 +32,7 @@ type KeyOpts struct { OIDCDisableProviders bool // Disable OIDC credential providers in keyless signer OIDCProvider string // Specify which OIDC credential provider to use for keyless signer BundlePath string + NewBundleFormat bool SkipConfirmation bool TSAClientCACert string TSAClientCert string diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/save.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/save.go index 58d449172b..7c4f623de6 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/save.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/save.go @@ -22,12 +22,14 @@ import ( // SaveOptions is the top level wrapper for the load command. type SaveOptions struct { Directory string + Registry RegistryOptions } var _ Interface = (*SaveOptions)(nil) // AddFlags implements Interface func (o *SaveOptions) AddFlags(cmd *cobra.Command) { + o.Registry.AddFlags(cmd) cmd.Flags().StringVar(&o.Directory, "dir", "", "path to dir where the signed image should be stored on disk") _ = cmd.Flags().SetAnnotation("dir", cobra.BashCompSubdirsInDir, []string{}) diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go index c7cef86072..70ec9acab6 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/sign.go @@ -21,26 +21,27 @@ import ( // SignOptions is the top level wrapper for the sign command. type SignOptions struct { - Key string - Cert string - CertChain string - Upload bool - Output string // deprecated: TODO remove when the output flag is fully deprecated - OutputSignature string // TODO: this should be the root output file arg. - OutputPayload string - OutputCertificate string - PayloadPath string - Recursive bool - Attachment string - SkipConfirmation bool - TlogUpload bool - TSAClientCACert string - TSAClientCert string - TSAClientKey string - TSAServerName string - TSAServerURL string - IssueCertificate bool - SignContainerIdentity string + Key string + Cert string + CertChain string + Upload bool + Output string // deprecated: TODO remove when the output flag is fully deprecated + OutputSignature string // TODO: this should be the root output file arg. + OutputPayload string + OutputCertificate string + PayloadPath string + Recursive bool + Attachment string + SkipConfirmation bool + TlogUpload bool + TSAClientCACert string + TSAClientCert string + TSAClientKey string + TSAServerName string + TSAServerURL string + IssueCertificate bool + SignContainerIdentity string + RecordCreationTimestamp bool Rekor RekorOptions Fulcio FulcioOptions @@ -130,4 +131,6 @@ func (o *SignOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.SignContainerIdentity, "sign-container-identity", "", "manually set the .critical.docker-reference field for the signed identity, which is useful when image proxies are being used where the pull reference should match the signature") + + cmd.Flags().BoolVar(&o.RecordCreationTimestamp, "record-creation-timestamp", false, "set the createdAt timestamp in the signature artifact to the time it was created; by default, cosign sets this to the zero value") } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go index 7cddde63df..d632669068 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/signblob.go @@ -33,6 +33,7 @@ type SignBlobOptions struct { OIDC OIDCOptions Registry RegistryOptions BundlePath string + NewBundleFormat bool SkipConfirmation bool TlogUpload bool TSAClientCACert string @@ -75,6 +76,10 @@ func (o *SignBlobOptions) AddFlags(cmd *cobra.Command) { "write everything required to verify the blob to a FILE") _ = cmd.Flags().SetAnnotation("bundle", cobra.BashCompFilenameExt, []string{}) + // TODO: have this default to true as a breaking change + cmd.Flags().BoolVar(&o.NewBundleFormat, "new-bundle-format", false, + "output bundle in new format that contains all verification material") + cmd.Flags().BoolVarP(&o.SkipConfirmation, "yes", "y", false, "skip confirmation prompts for non-destructive operations") diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/verify.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/verify.go index f89d227bfa..3cdbb0e8a6 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/verify.go @@ -30,6 +30,7 @@ type CommonVerifyOptions struct { // it for other verify options. ExperimentalOCI11 bool PrivateInfrastructure bool + UseSignedTimestamps bool } func (o *CommonVerifyOptions) AddFlags(cmd *cobra.Command) { @@ -40,6 +41,9 @@ func (o *CommonVerifyOptions) AddFlags(cmd *cobra.Command) { "path to PEM-encoded certificate chain file for the RFC3161 timestamp authority. Must contain the root CA certificate. "+ "Optionally may contain intermediate CA certificates, and may contain the leaf TSA certificate if not present in the timestamp") + cmd.Flags().BoolVar(&o.UseSignedTimestamps, "use-signed-timestamps", false, + "use signed timestamps if available") + cmd.Flags().BoolVar(&o.IgnoreTlog, "insecure-ignore-tlog", false, "ignore transparency log verification, to be used when an artifact signature has not been uploaded to the transparency log. Artifacts "+ "cannot be publicly verified when not included in a log") @@ -143,7 +147,7 @@ func (o *VerifyAttestationOptions) AddFlags(cmd *cobra.Command) { "whether to check the claims found") cmd.Flags().StringSliceVar(&o.Policies, "policy", nil, - "specify CUE or Rego files will be using for validation") + "specify CUE or Rego files with policies to be used for validation") cmd.Flags().StringVarP(&o.Output, "output", "o", "json", "output format for the signing image information (json|text)") @@ -154,9 +158,11 @@ func (o *VerifyAttestationOptions) AddFlags(cmd *cobra.Command) { // VerifyBlobOptions is the top level wrapper for the `verify blob` command. type VerifyBlobOptions struct { - Key string - Signature string - BundlePath string + Key string + Signature string + BundlePath string + NewBundleFormat bool + TrustedRootPath string SecurityKey SecurityKeyOptions CertVerify CertVerifyOptions @@ -184,6 +190,13 @@ func (o *VerifyBlobOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.BundlePath, "bundle", "", "path to bundle FILE") + // TODO: have this default to true as a breaking change + cmd.Flags().BoolVar(&o.NewBundleFormat, "new-bundle-format", false, + "output bundle in new format that contains all verification material") + + cmd.Flags().StringVar(&o.TrustedRootPath, "trusted-root", "", + "path to trusted root FILE") + cmd.Flags().StringVar(&o.RFC3161TimestampPath, "rfc3161-timestamp", "", "path to RFC3161 timestamp FILE") } @@ -206,9 +219,11 @@ func (o *VerifyDockerfileOptions) AddFlags(cmd *cobra.Command) { // VerifyBlobAttestationOptions is the top level wrapper for the `verify-blob-attestation` command. type VerifyBlobAttestationOptions struct { - Key string - SignaturePath string - BundlePath string + Key string + SignaturePath string + BundlePath string + NewBundleFormat bool + TrustedRootPath string PredicateOptions CheckClaims bool @@ -240,6 +255,13 @@ func (o *VerifyBlobAttestationOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.BundlePath, "bundle", "", "path to bundle FILE") + // TODO: have this default to true as a breaking change + cmd.Flags().BoolVar(&o.NewBundleFormat, "new-bundle-format", false, + "output bundle in new format that contains all verification material") + + cmd.Flags().StringVar(&o.TrustedRootPath, "trusted-root", "", + "path to trusted root FILE") + cmd.Flags().BoolVar(&o.CheckClaims, "check-claims", true, "if true, verifies the provided blob's sha256 digest exists as an in-toto subject within the attestation. If false, only the DSSE envelope is verified.") diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign.go index 122aaa3339..1289e7b1bb 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign.go @@ -329,7 +329,7 @@ func signDigest(ctx context.Context, digest name.Digest, payload []byte, ko opti } // Attach the signature to the entity. - newSE, err := mutate.AttachSignatureToEntity(se, ociSig, mutate.WithDupeDetector(dd)) + newSE, err := mutate.AttachSignatureToEntity(se, ociSig, mutate.WithDupeDetector(dd), mutate.WithRecordCreationTimestamp(signOpts.RecordCreationTimestamp)) if err != nil { return err } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign_blob.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign_blob.go index b60db5b7a2..5a7b960b92 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign_blob.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/sign_blob.go @@ -18,12 +18,15 @@ package sign import ( "context" "crypto/sha256" + "crypto/x509" "encoding/base64" "encoding/json" "fmt" "os" "path/filepath" + "google.golang.org/protobuf/encoding/protojson" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" internal "github.com/sigstore/cosign/v2/internal/pkg/cosign" @@ -32,6 +35,9 @@ import ( "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign" cbundle "github.com/sigstore/cosign/v2/pkg/cosign/bundle" + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/sigstore/pkg/cryptoutils" signatureoptions "github.com/sigstore/sigstore/pkg/signature/options" ) @@ -49,6 +55,7 @@ func SignBlobCmd(ro *options.RootOptions, ko options.KeyOpts, payloadPath string } else { ui.Infof(ctx, "Using payload from: %s", payloadPath) f, err := os.Open(filepath.Clean(payloadPath)) + defer f.Close() if err != nil { return nil, err } @@ -68,23 +75,25 @@ func SignBlobCmd(ro *options.RootOptions, ko options.KeyOpts, payloadPath string if err != nil { return nil, fmt.Errorf("signing blob: %w", err) } + digest := payload.Sum(nil) signedPayload := cosign.LocalSignedPayload{} - + var rekorEntry *models.LogEntryAnon var rfc3161Timestamp *cbundle.RFC3161Timestamp + var timestampBytes []byte + if ko.TSAServerURL != "" { - if ko.RFC3161TimestampPath == "" { - return nil, fmt.Errorf("timestamp output path must be set") + if ko.RFC3161TimestampPath == "" && !ko.NewBundleFormat { + return nil, fmt.Errorf("must use protobuf bundle or set timestamp output path") } - var respBytes []byte var err error if ko.TSAClientCACert == "" && ko.TSAClientCert == "" { // no mTLS params or custom CA - respBytes, err = tsa.GetTimestampedSignature(sig, client.NewTSAClient(ko.TSAServerURL)) + timestampBytes, err = tsa.GetTimestampedSignature(sig, client.NewTSAClient(ko.TSAServerURL)) if err != nil { return nil, err } } else { - respBytes, err = tsa.GetTimestampedSignature(sig, client.NewTSAClientMTLS(ko.TSAServerURL, + timestampBytes, err = tsa.GetTimestampedSignature(sig, client.NewTSAClientMTLS(ko.TSAServerURL, ko.TSAClientCACert, ko.TSAClientCert, ko.TSAClientKey, @@ -95,20 +104,23 @@ func SignBlobCmd(ro *options.RootOptions, ko options.KeyOpts, payloadPath string } } - rfc3161Timestamp = cbundle.TimestampToRFC3161Timestamp(respBytes) + rfc3161Timestamp = cbundle.TimestampToRFC3161Timestamp(timestampBytes) // TODO: Consider uploading RFC3161 TS to Rekor if rfc3161Timestamp == nil { return nil, fmt.Errorf("rfc3161 timestamp is nil") } - ts, err := json.Marshal(rfc3161Timestamp) - if err != nil { - return nil, err - } - if err := os.WriteFile(ko.RFC3161TimestampPath, ts, 0600); err != nil { - return nil, fmt.Errorf("create RFC3161 timestamp file: %w", err) + + if ko.RFC3161TimestampPath != "" { + ts, err := json.Marshal(rfc3161Timestamp) + if err != nil { + return nil, err + } + if err := os.WriteFile(ko.RFC3161TimestampPath, ts, 0600); err != nil { + return nil, fmt.Errorf("create RFC3161 timestamp file: %w", err) + } + ui.Infof(ctx, "RFC3161 timestamp written to file %s\n", ko.RFC3161TimestampPath) } - ui.Infof(ctx, "RFC3161 timestamp written to file %s\n", ko.RFC3161TimestampPath) } shouldUpload, err := ShouldUploadToTlog(ctx, ko, nil, tlogUpload) if err != nil { @@ -123,28 +135,76 @@ func SignBlobCmd(ro *options.RootOptions, ko options.KeyOpts, payloadPath string if err != nil { return nil, err } - entry, err := cosign.TLogUpload(ctx, rekorClient, sig, &payload, rekorBytes) + rekorEntry, err = cosign.TLogUpload(ctx, rekorClient, sig, &payload, rekorBytes) if err != nil { return nil, err } - ui.Infof(ctx, "tlog entry created with index: %d", *entry.LogIndex) - signedPayload.Bundle = cbundle.EntryToBundle(entry) + ui.Infof(ctx, "tlog entry created with index: %d", *rekorEntry.LogIndex) + signedPayload.Bundle = cbundle.EntryToBundle(rekorEntry) } // if bundle is specified, just do that and ignore the rest if ko.BundlePath != "" { - signedPayload.Base64Signature = base64.StdEncoding.EncodeToString(sig) + var contents []byte + if ko.NewBundleFormat { + // Determine if signature is certificate or not + var hint string + var rawCert []byte - certBytes, err := extractCertificate(ctx, sv) - if err != nil { - return nil, err - } - signedPayload.Cert = base64.StdEncoding.EncodeToString(certBytes) + signer, err := sv.Bytes(ctx) + if err != nil { + return nil, fmt.Errorf("error getting signer: %w", err) + } + cert, err := cryptoutils.UnmarshalCertificatesFromPEM(signer) + if err != nil || len(cert) == 0 { + pubKey, err := sv.PublicKey() + if err != nil { + return nil, err + } + pkixPubKey, err := x509.MarshalPKIXPublicKey(pubKey) + if err != nil { + return nil, err + } + hashedBytes := sha256.Sum256(pkixPubKey) + hint = base64.StdEncoding.EncodeToString(hashedBytes[:]) + } else { + rawCert = cert[0].Raw + } - contents, err := json.Marshal(signedPayload) - if err != nil { - return nil, err + bundle, err := cbundle.MakeProtobufBundle(hint, rawCert, rekorEntry, timestampBytes) + if err != nil { + return nil, err + } + + bundle.Content = &protobundle.Bundle_MessageSignature{ + MessageSignature: &protocommon.MessageSignature{ + MessageDigest: &protocommon.HashOutput{ + Algorithm: protocommon.HashAlgorithm_SHA2_256, + Digest: digest, + }, + Signature: sig, + }, + } + + contents, err = protojson.Marshal(bundle) + if err != nil { + return nil, err + } + } else { + signedPayload.Base64Signature = base64.StdEncoding.EncodeToString(sig) + + certBytes, err := extractCertificate(ctx, sv) + if err != nil { + return nil, err + } + signedPayload.Cert = base64.StdEncoding.EncodeToString(certBytes) + + contents, err = json.Marshal(signedPayload) + if err != nil { + return nil, err + } } + if err := os.WriteFile(ko.BundlePath, contents, 0600); err != nil { return nil, fmt.Errorf("create bundle file: %w", err) } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify.go index de91d9229a..17fd63e833 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify.go @@ -34,7 +34,6 @@ import ( "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" cosignError "github.com/sigstore/cosign/v2/cmd/cosign/errors" - "github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa" "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/blob" "github.com/sigstore/cosign/v2/pkg/cosign" @@ -60,6 +59,8 @@ type VerifyCommand struct { CertGithubWorkflowName string CertGithubWorkflowRepository string CertGithubWorkflowRef string + CAIntermediates string + CARoots string CertChain string CertOidcProvider string IgnoreSCT bool @@ -77,11 +78,23 @@ type VerifyCommand struct { NameOptions []name.Option Offline bool TSACertChainPath string + UseSignedTimestamps bool IgnoreTlog bool MaxWorkers int ExperimentalOCI11 bool } +func (c *VerifyCommand) loadTSACertificates(ctx context.Context) (*cosign.TSACertificates, error) { + if c.TSACertChainPath == "" && !c.UseSignedTimestamps { + return nil, fmt.Errorf("TSA certificate chain path not provided and use-signed-timestamps not set") + } + tsaCertificates, err := cosign.GetTSACerts(ctx, c.TSACertChainPath, cosign.GetTufTargets) + if err != nil { + return nil, fmt.Errorf("unable to load TSA certificates: %w", err) + } + return tsaCertificates, nil +} + // Exec runs the verification command func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { if len(images) == 0 { @@ -136,29 +149,14 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { co.ClaimVerifier = cosign.SimpleClaimVerifier } - if c.TSACertChainPath != "" { - _, err := os.Stat(c.TSACertChainPath) + if c.TSACertChainPath != "" || c.UseSignedTimestamps { + tsaCertificates, err := c.loadTSACertificates(ctx) if err != nil { - return fmt.Errorf("unable to open timestamp certificate chain file: %w", err) + return fmt.Errorf("unable to load TSA certificates: %w", err) } - // TODO: Add support for TUF certificates. - pemBytes, err := os.ReadFile(filepath.Clean(c.TSACertChainPath)) - if err != nil { - return fmt.Errorf("error reading certification chain path file: %w", err) - } - - leaves, intermediates, roots, err := tsa.SplitPEMCertificateChain(pemBytes) - if err != nil { - return fmt.Errorf("error splitting certificates: %w", err) - } - if len(leaves) > 1 { - return fmt.Errorf("certificate chain must contain at most one TSA certificate") - } - if len(leaves) == 1 { - co.TSACertificate = leaves[0] - } - co.TSAIntermediateCertificates = intermediates - co.TSARootCertificates = roots + co.TSACertificate = tsaCertificates.LeafCert + co.TSARootCertificates = tsaCertificates.RootCert + co.TSAIntermediateCertificates = tsaCertificates.IntermediateCerts } if !c.IgnoreTlog { @@ -177,32 +175,11 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { } } if keylessVerification(c.KeyRef, c.Sk) { - if c.CertChain != "" { - chain, err := loadCertChainFromFileOrURL(c.CertChain) - if err != nil { - return err - } - co.RootCerts = x509.NewCertPool() - co.RootCerts.AddCert(chain[len(chain)-1]) - if len(chain) > 1 { - co.IntermediateCerts = x509.NewCertPool() - for _, cert := range chain[:len(chain)-1] { - co.IntermediateCerts.AddCert(cert) - } - } - } else { - // This performs an online fetch of the Fulcio roots. This is needed - // for verifying keyless certificates (both online and offline). - co.RootCerts, err = fulcio.GetRoots() - if err != nil { - return fmt.Errorf("getting Fulcio roots: %w", err) - } - co.IntermediateCerts, err = fulcio.GetIntermediates() - if err != nil { - return fmt.Errorf("getting Fulcio intermediates: %w", err) - } + if err := loadCertsKeylessVerification(c.CertChain, c.CARoots, c.CAIntermediates, co); err != nil { + return err } } + keyRef := c.KeyRef certRef := c.CertRef @@ -241,8 +218,9 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { if err != nil { return err } - if c.CertChain == "" { - // If no certChain is passed, the Fulcio root certificate will be used + switch { + case c.CertChain == "" && co.RootCerts == nil: + // If no certChain and no CARoots are passed, the Fulcio root certificate will be used co.RootCerts, err = fulcio.GetRoots() if err != nil { return fmt.Errorf("getting Fulcio roots: %w", err) @@ -255,7 +233,7 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { if err != nil { return err } - } else { + case c.CertChain != "": // Verify certificate with chain chain, err := loadCertChainFromFileOrURL(c.CertChain) if err != nil { @@ -265,7 +243,16 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { if err != nil { return err } + case co.RootCerts != nil: + // Verify certificate with root (and if given, intermediate) certificate + pubKey, err = cosign.ValidateAndUnpackCert(cert, co) + if err != nil { + return err + } + default: + return errors.New("no certificate chain provided to verify certificate") } + if c.SCTRef != "" { sct, err := os.ReadFile(filepath.Clean(c.SCTRef)) if err != nil { @@ -273,6 +260,9 @@ func (c *VerifyCommand) Exec(ctx context.Context, images []string) (err error) { } co.SCT = sct } + default: + // Do nothing. Neither keyRef, c.Sk, nor certRef were set - can happen for example when using Fulcio and TSA. + // For an example see the TestAttachWithRFC3161Timestamp test in test/e2e_test.go. } co.SigVerifier = pubKey @@ -521,3 +511,66 @@ func shouldVerifySCT(ignoreSCT bool, keyRef string, sk bool) bool { } return true } + +// loadCertsKeylessVerification loads certificates provided as a certificate chain or CA roots + CA intermediate +// certificate files. If both certChain and caRootsFile are empty strings, the Fulcio roots are loaded. +// +// The co *cosign.CheckOpts is both input and output parameter - it gets updated +// with the root and intermediate certificates needed for verification. +func loadCertsKeylessVerification(certChainFile string, + caRootsFile string, + caIntermediatesFile string, + co *cosign.CheckOpts) error { + var err error + switch { + case certChainFile != "": + chain, err := loadCertChainFromFileOrURL(certChainFile) + if err != nil { + return err + } + co.RootCerts = x509.NewCertPool() + co.RootCerts.AddCert(chain[len(chain)-1]) + if len(chain) > 1 { + co.IntermediateCerts = x509.NewCertPool() + for _, cert := range chain[:len(chain)-1] { + co.IntermediateCerts.AddCert(cert) + } + } + case caRootsFile != "": + caRoots, err := loadCertChainFromFileOrURL(caRootsFile) + if err != nil { + return err + } + co.RootCerts = x509.NewCertPool() + if len(caRoots) > 0 { + for _, cert := range caRoots { + co.RootCerts.AddCert(cert) + } + } + if caIntermediatesFile != "" { + caIntermediates, err := loadCertChainFromFileOrURL(caIntermediatesFile) + if err != nil { + return err + } + if len(caIntermediates) > 0 { + co.IntermediateCerts = x509.NewCertPool() + for _, cert := range caIntermediates { + co.IntermediateCerts.AddCert(cert) + } + } + } + default: + // This performs an online fetch of the Fulcio roots from a TUF repository. + // This is needed for verifying keyless certificates (both online and offline). + co.RootCerts, err = fulcio.GetRoots() + if err != nil { + return fmt.Errorf("getting Fulcio roots: %w", err) + } + co.IntermediateCerts, err = fulcio.GetIntermediates() + if err != nil { + return fmt.Errorf("getting Fulcio intermediates: %w", err) + } + } + + return nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_attestation.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_attestation.go index a9ff6dd998..93c2769045 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_attestation.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_attestation.go @@ -28,7 +28,6 @@ import ( "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" - "github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa" "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign" "github.com/sigstore/cosign/v2/pkg/cosign/cue" @@ -53,6 +52,8 @@ type VerifyAttestationCommand struct { CertGithubWorkflowName string CertGithubWorkflowRepository string CertGithubWorkflowRef string + CAIntermediates string + CARoots string CertChain string IgnoreSCT bool SCTRef string @@ -68,6 +69,18 @@ type VerifyAttestationCommand struct { TSACertChainPath string IgnoreTlog bool MaxWorkers int + UseSignedTimestamps bool +} + +func (c *VerifyAttestationCommand) loadTSACertificates(ctx context.Context) (*cosign.TSACertificates, error) { + if c.TSACertChainPath == "" && !c.UseSignedTimestamps { + return nil, fmt.Errorf("TSA certificate chain path not provided and use-signed-timestamps not set") + } + tsaCertificates, err := cosign.GetTSACerts(ctx, c.TSACertChainPath, cosign.GetTufTargets) + if err != nil { + return nil, fmt.Errorf("unable to load TSA certificates: %w", err) + } + return tsaCertificates, nil } // Exec runs the verification command @@ -118,30 +131,16 @@ func (c *VerifyAttestationCommand) Exec(ctx context.Context, images []string) (e } } - if c.TSACertChainPath != "" { - _, err := os.Stat(c.TSACertChainPath) - if err != nil { - return fmt.Errorf("unable to open timestamp certificate chain file '%s: %w", c.TSACertChainPath, err) - } - // TODO: Add support for TUF certificates. - pemBytes, err := os.ReadFile(filepath.Clean(c.TSACertChainPath)) - if err != nil { - return fmt.Errorf("error reading certification chain path file: %w", err) - } - - leaves, intermediates, roots, err := tsa.SplitPEMCertificateChain(pemBytes) + if c.TSACertChainPath != "" || c.UseSignedTimestamps { + tsaCertificates, err := c.loadTSACertificates(ctx) if err != nil { - return fmt.Errorf("error splitting certificates: %w", err) + return fmt.Errorf("unable to load TSA certificates: %w", err) } - if len(leaves) > 1 { - return fmt.Errorf("certificate chain must contain at most one TSA certificate") - } - if len(leaves) == 1 { - co.TSACertificate = leaves[0] - } - co.TSAIntermediateCertificates = intermediates - co.TSARootCertificates = roots + co.TSACertificate = tsaCertificates.LeafCert + co.TSARootCertificates = tsaCertificates.RootCert + co.TSAIntermediateCertificates = tsaCertificates.IntermediateCerts } + if !c.IgnoreTlog { if c.RekorURL != "" { rekorClient, err := rekor.NewClient(c.RekorURL) @@ -157,18 +156,13 @@ func (c *VerifyAttestationCommand) Exec(ctx context.Context, images []string) (e return fmt.Errorf("getting Rekor public keys: %w", err) } } + if keylessVerification(c.KeyRef, c.Sk) { - // This performs an online fetch of the Fulcio roots. This is needed - // for verifying keyless certificates (both online and offline). - co.RootCerts, err = fulcio.GetRoots() - if err != nil { - return fmt.Errorf("getting Fulcio roots: %w", err) - } - co.IntermediateCerts, err = fulcio.GetIntermediates() - if err != nil { - return fmt.Errorf("getting Fulcio intermediates: %w", err) + if err := loadCertsKeylessVerification(c.CertChain, c.CARoots, c.CAIntermediates, co); err != nil { + return err } } + keyRef := c.KeyRef // Keys are optional! @@ -229,6 +223,9 @@ func (c *VerifyAttestationCommand) Exec(ctx context.Context, images []string) (e } co.SCT = sct } + case c.CARoots != "": + // CA roots + possible intermediates are already loaded into co.RootCerts with the call to + // loadCertsKeylessVerification above. } // NB: There are only 2 kinds of verification right now: diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob.go index 9648ef3613..79475c90d8 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob.go @@ -24,13 +24,12 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" - "github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa" "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/blob" "github.com/sigstore/cosign/v2/pkg/cosign" @@ -53,8 +52,11 @@ type VerifyBlobCmd struct { options.KeyOpts options.CertVerifyOptions CertRef string + CAIntermediates string + CARoots string CertChain string SigRef string + TrustedRootPath string CertGithubWorkflowTrigger string CertGithubWorkflowSHA string CertGithubWorkflowName string @@ -63,14 +65,23 @@ type VerifyBlobCmd struct { IgnoreSCT bool SCTRef string Offline bool + UseSignedTimestamps bool IgnoreTlog bool } +func (c *VerifyBlobCmd) loadTSACertificates(ctx context.Context) (*cosign.TSACertificates, error) { + if c.TSACertChainPath == "" && !c.UseSignedTimestamps { + return nil, fmt.Errorf("either TSA certificate chain path must be provided or use-signed-timestamps must be set") + } + tsaCertificates, err := cosign.GetTSACerts(ctx, c.TSACertChainPath, cosign.GetTufTargets) + if err != nil { + return nil, fmt.Errorf("unable to load TSA certificates: %w", err) + } + return tsaCertificates, nil +} + // nolint func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { - var cert *x509.Certificate - opts := make([]static.Option, 0) - // Require a certificate/key OR a local bundle file that has the cert. if options.NOf(c.KeyRef, c.CertRef, c.Sk, c.BundlePath) == 0 { return fmt.Errorf("provide a key with --key or --sk, a certificate to verify against with --certificate, or a bundle with --bundle") @@ -81,6 +92,22 @@ func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { return &options.PubKeyParseError{} } + if c.KeyOpts.NewBundleFormat { + if options.NOf(c.RFC3161TimestampPath, c.TSACertChainPath, c.RekorURL, c.CertChain, c.CARoots, c.CAIntermediates, c.CertRef, c.SigRef, c.SCTRef) > 1 { + return fmt.Errorf("when using --new-bundle-format, please supply signed content with --bundle and verification content with --trusted-root") + } + err := verifyNewBundle(ctx, c.BundlePath, c.TrustedRootPath, c.KeyRef, c.Slot, c.CertVerifyOptions.CertOidcIssuer, c.CertVerifyOptions.CertOidcIssuerRegexp, c.CertVerifyOptions.CertIdentity, c.CertVerifyOptions.CertIdentityRegexp, c.CertGithubWorkflowTrigger, c.CertGithubWorkflowSHA, c.CertGithubWorkflowName, c.CertGithubWorkflowRepository, c.CertGithubWorkflowRef, blobRef, c.Sk, c.IgnoreTlog, c.UseSignedTimestamps, c.IgnoreSCT) + if err == nil { + ui.Infof(ctx, "Verified OK") + } + return err + } else if c.TrustedRootPath != "" { + return fmt.Errorf("--trusted-root only supported with --new-bundle-format") + } + + var cert *x509.Certificate + opts := make([]static.Option, 0) + var identities []cosign.Identity var err error if c.KeyRef == "" { @@ -111,32 +138,17 @@ func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { Offline: c.Offline, IgnoreTlog: c.IgnoreTlog, } - if c.RFC3161TimestampPath != "" && c.KeyOpts.TSACertChainPath == "" { - return fmt.Errorf("timestamp-certificate-chain is required to validate a RFC3161 timestamp") + if c.RFC3161TimestampPath != "" && !(c.TSACertChainPath != "" || c.UseSignedTimestamps) { + return fmt.Errorf("either TSA certificate chain path must be provided or use-signed-timestamps must be set when using RFC3161 timestamp path") } - if c.KeyOpts.TSACertChainPath != "" { - _, err := os.Stat(c.KeyOpts.TSACertChainPath) + if c.TSACertChainPath != "" || c.UseSignedTimestamps { + tsaCertificates, err := c.loadTSACertificates(ctx) if err != nil { - return fmt.Errorf("unable to open timestamp certificate chain file '%s: %w", c.KeyOpts.TSACertChainPath, err) - } - // TODO: Add support for TUF certificates. - pemBytes, err := os.ReadFile(filepath.Clean(c.KeyOpts.TSACertChainPath)) - if err != nil { - return fmt.Errorf("error reading certification chain path file: %w", err) - } - - leaves, intermediates, roots, err := tsa.SplitPEMCertificateChain(pemBytes) - if err != nil { - return fmt.Errorf("error splitting certificates: %w", err) - } - if len(leaves) > 1 { - return fmt.Errorf("certificate chain must contain at most one TSA certificate") - } - if len(leaves) == 1 { - co.TSACertificate = leaves[0] + return err } - co.TSAIntermediateCertificates = intermediates - co.TSARootCertificates = roots + co.TSACertificate = tsaCertificates.LeafCert + co.TSARootCertificates = tsaCertificates.RootCert + co.TSAIntermediateCertificates = tsaCertificates.IntermediateCerts } if !c.IgnoreTlog { @@ -154,19 +166,10 @@ func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { return fmt.Errorf("getting Rekor public keys: %w", err) } } + if keylessVerification(c.KeyRef, c.Sk) { - // Use default TUF roots if a cert chain is not provided. - // This performs an online fetch of the Fulcio roots. This is needed - // for verifying keyless certificates (both online and offline). - if c.CertChain == "" { - co.RootCerts, err = fulcio.GetRoots() - if err != nil { - return fmt.Errorf("getting Fulcio roots: %w", err) - } - co.IntermediateCerts, err = fulcio.GetIntermediates() - if err != nil { - return fmt.Errorf("getting Fulcio intermediates: %w", err) - } + if err := loadCertsKeylessVerification(c.CertChain, c.CARoots, c.CAIntermediates, co); err != nil { + return err } } @@ -252,7 +255,8 @@ func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { } // Set a cert chain if provided. var chainPEM []byte - if c.CertChain != "" { + switch { + case c.CertChain != "": chain, err := loadCertChainFromFileOrURL(c.CertChain) if err != nil { return err @@ -272,6 +276,9 @@ func (c *VerifyBlobCmd) Exec(ctx context.Context, blobRef string) error { if err != nil { return err } + case c.CARoots != "": + // CA roots + possible intermediates are already loaded into co.RootCerts with the call to + // loadCertsKeylessVerification above. } // Gather the cert for the signature and add the cert along with the @@ -313,7 +320,7 @@ func base64signature(sigRef, bundlePath string) (string, error) { case sigRef != "": targetSig, err = blob.LoadFileOrURL(sigRef) if err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { // ignore if file does not exist, it can be a base64 encoded string as well return "", err } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob_attestation.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob_attestation.go index 0d9c9b77a6..3f2c33cc63 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob_attestation.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_blob_attestation.go @@ -30,11 +30,10 @@ import ( "path/filepath" v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" internal "github.com/sigstore/cosign/v2/internal/pkg/cosign" - "github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa" + payloadsize "github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size" "github.com/sigstore/cosign/v2/pkg/blob" "github.com/sigstore/cosign/v2/pkg/cosign" "github.com/sigstore/cosign/v2/pkg/cosign/bundle" @@ -52,8 +51,11 @@ type VerifyBlobAttestationCommand struct { options.KeyOpts options.CertVerifyOptions - CertRef string - CertChain string + CertRef string + CertChain string + CAIntermediates string + CARoots string + TrustedRootPath string CertGithubWorkflowTrigger string CertGithubWorkflowSHA string @@ -70,7 +72,8 @@ type VerifyBlobAttestationCommand struct { PredicateType string // TODO: Add policies - SignaturePath string // Path to the signature + SignaturePath string // Path to the signature + UseSignedTimestamps bool } // Exec runs the verification command @@ -89,6 +92,19 @@ func (c *VerifyBlobAttestationCommand) Exec(ctx context.Context, artifactPath st return &options.KeyParseError{} } + if c.KeyOpts.NewBundleFormat { + if options.NOf(c.RFC3161TimestampPath, c.TSACertChainPath, c.RekorURL, c.CertChain, c.CARoots, c.CAIntermediates, c.CertRef, c.SCTRef) > 1 { + return fmt.Errorf("when using --new-bundle-format, please supply signed content with --bundle and verification content with --trusted-root") + } + err = verifyNewBundle(ctx, c.BundlePath, c.TrustedRootPath, c.KeyRef, c.Slot, c.CertVerifyOptions.CertOidcIssuer, c.CertVerifyOptions.CertOidcIssuerRegexp, c.CertVerifyOptions.CertIdentity, c.CertVerifyOptions.CertIdentityRegexp, c.CertGithubWorkflowTrigger, c.CertGithubWorkflowSHA, c.CertGithubWorkflowName, c.CertGithubWorkflowRepository, c.CertGithubWorkflowRef, artifactPath, c.Sk, c.IgnoreTlog, c.UseSignedTimestamps, c.IgnoreSCT) + if err == nil { + fmt.Fprintln(os.Stderr, "Verified OK") + } + return err + } else if c.TrustedRootPath != "" { + return fmt.Errorf("--trusted-root only supported with --new-bundle-format") + } + var identities []cosign.Identity if c.KeyRef == "" { identities, err = c.Identities() @@ -117,6 +133,14 @@ func (c *VerifyBlobAttestationCommand) Exec(ctx context.Context, artifactPath st return err } defer f.Close() + fileInfo, err := f.Stat() + if err != nil { + return err + } + err = payloadsize.CheckSize(uint64(fileInfo.Size())) + if err != nil { + return err + } payload = internal.NewHashReader(f, sha256.New()) if _, err := io.ReadAll(&payload); err != nil { @@ -131,32 +155,18 @@ func (c *VerifyBlobAttestationCommand) Exec(ctx context.Context, artifactPath st } // Set up TSA, Fulcio roots and tlog public keys and clients. - if c.RFC3161TimestampPath != "" && c.KeyOpts.TSACertChainPath == "" { - return fmt.Errorf("timestamp-cert-chain is required to validate a rfc3161 timestamp bundle") + if c.RFC3161TimestampPath != "" && !(c.TSACertChainPath != "" || c.UseSignedTimestamps) { + return fmt.Errorf("either TSA certificate chain path must be provided or use-signed-timestamps must be set when using RFC3161 timestamp path") } - if c.KeyOpts.TSACertChainPath != "" { - _, err := os.Stat(c.TSACertChainPath) - if err != nil { - return fmt.Errorf("unable to open timestamp certificate chain file: %w", err) - } - // TODO: Add support for TUF certificates. - pemBytes, err := os.ReadFile(filepath.Clean(c.TSACertChainPath)) - if err != nil { - return fmt.Errorf("error reading certification chain path file: %w", err) - } - leaves, intermediates, roots, err := tsa.SplitPEMCertificateChain(pemBytes) + if c.TSACertChainPath != "" || c.UseSignedTimestamps { + tsaCertificates, err := cosign.GetTSACerts(ctx, c.TSACertChainPath, cosign.GetTufTargets) if err != nil { - return fmt.Errorf("error splitting certificates: %w", err) + return fmt.Errorf("unable to load or get TSA certificates: %w", err) } - if len(leaves) > 1 { - return fmt.Errorf("certificate chain must contain at most one TSA certificate") - } - if len(leaves) == 1 { - co.TSACertificate = leaves[0] - } - co.TSAIntermediateCertificates = intermediates - co.TSARootCertificates = roots + co.TSACertificate = tsaCertificates.LeafCert + co.TSARootCertificates = tsaCertificates.RootCert + co.TSAIntermediateCertificates = tsaCertificates.IntermediateCerts } if !c.IgnoreTlog { @@ -175,20 +185,11 @@ func (c *VerifyBlobAttestationCommand) Exec(ctx context.Context, artifactPath st } } if keylessVerification(c.KeyRef, c.Sk) { - // Use default TUF roots if a cert chain is not provided. - // This performs an online fetch of the Fulcio roots. This is needed - // for verifying keyless certificates (both online and offline). - if c.CertChain == "" { - co.RootCerts, err = fulcio.GetRoots() - if err != nil { - return fmt.Errorf("getting Fulcio roots: %w", err) - } - co.IntermediateCerts, err = fulcio.GetIntermediates() - if err != nil { - return fmt.Errorf("getting Fulcio intermediates: %w", err) - } + if err := loadCertsKeylessVerification(c.CertChain, c.CARoots, c.CAIntermediates, co); err != nil { + return err } } + // Ignore Signed Certificate Timestamp if the flag is set or a key is provided if shouldVerifySCT(c.IgnoreSCT, c.KeyRef, c.Sk) { co.CTLogPubKeys, err = cosign.GetCTLogPubs(ctx) @@ -233,6 +234,9 @@ func (c *VerifyBlobAttestationCommand) Exec(ctx context.Context, artifactPath st if err != nil { return err } + case c.CARoots != "": + // CA roots + possible intermediates are already loaded into co.RootCerts with the call to + // loadCertsKeylessVerification above. } if c.BundlePath != "" { b, err := cosign.FetchLocalSignedPayloadFromPath(c.BundlePath) diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_bundle.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_bundle.go new file mode 100644 index 0000000000..01921be7ff --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/verify/verify_bundle.go @@ -0,0 +1,163 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "fmt" + "time" + + sgbundle "github.com/sigstore/sigstore-go/pkg/bundle" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" + + "github.com/sigstore/cosign/v2/pkg/cosign/pivkey" + sigs "github.com/sigstore/cosign/v2/pkg/signature" +) + +type verifyTrustedMaterial struct { + root.TrustedMaterial + keyTrustedMaterial root.TrustedMaterial +} + +func (v *verifyTrustedMaterial) PublicKeyVerifier(hint string) (root.TimeConstrainedVerifier, error) { + return v.keyTrustedMaterial.PublicKeyVerifier(hint) +} + +func verifyNewBundle(ctx context.Context, bundlePath, trustedRootPath, keyRef, slot, certOIDCIssuer, certOIDCIssuerRegex, certIdentity, certIdentityRegexp, githubWorkflowTrigger, githubWorkflowSHA, githubWorkflowName, githubWorkflowRepository, githubWorkflowRef, artifactRef string, sk, ignoreTlog, useSignedTimestamps, ignoreSCT bool) error { + bundle, err := sgbundle.LoadJSONFromPath(bundlePath) + if err != nil { + return err + } + + var trustedroot *root.TrustedRoot + + if trustedRootPath == "" { + // Assume we're using public good instance; fetch via TUF + trustedroot, err = root.FetchTrustedRoot() + if err != nil { + return err + } + } else { + trustedroot, err = root.NewTrustedRootFromPath(trustedRootPath) + if err != nil { + return err + } + } + + trustedmaterial := &verifyTrustedMaterial{TrustedMaterial: trustedroot} + + // See if we need to wrap trusted root with provided key + if keyRef != "" { + signatureVerifier, err := sigs.PublicKeyFromKeyRef(ctx, keyRef) + if err != nil { + return err + } + + newExpiringKey := root.NewExpiringKey(signatureVerifier, time.Time{}, time.Time{}) + trustedmaterial.keyTrustedMaterial = root.NewTrustedPublicKeyMaterial(func(_ string) (root.TimeConstrainedVerifier, error) { + return newExpiringKey, nil + }) + } else if sk { + s, err := pivkey.GetKeyWithSlot(slot) + if err != nil { + return fmt.Errorf("opening piv token: %w", err) + } + defer s.Close() + signatureVerifier, err := s.Verifier() + if err != nil { + return fmt.Errorf("loading public key from token: %w", err) + } + + newExpiringKey := root.NewExpiringKey(signatureVerifier, time.Time{}, time.Time{}) + trustedmaterial.keyTrustedMaterial = root.NewTrustedPublicKeyMaterial(func(_ string) (root.TimeConstrainedVerifier, error) { + return newExpiringKey, nil + }) + } + + identityPolicies := []verify.PolicyOption{} + + verificationMaterial := bundle.GetVerificationMaterial() + + if verificationMaterial == nil { + return fmt.Errorf("no verification material in bundle") + } + + if verificationMaterial.GetPublicKey() != nil { + identityPolicies = append(identityPolicies, verify.WithKey()) + } else { + sanMatcher, err := verify.NewSANMatcher(certIdentity, certIdentityRegexp) + if err != nil { + return err + } + + issuerMatcher, err := verify.NewIssuerMatcher(certOIDCIssuer, certOIDCIssuerRegex) + if err != nil { + return err + } + + extensions := certificate.Extensions{ + GithubWorkflowTrigger: githubWorkflowTrigger, + GithubWorkflowSHA: githubWorkflowSHA, + GithubWorkflowName: githubWorkflowName, + GithubWorkflowRepository: githubWorkflowRepository, + GithubWorkflowRef: githubWorkflowRef, + } + + certIdentity, err := verify.NewCertificateIdentity(sanMatcher, issuerMatcher, extensions) + if err != nil { + return err + } + + identityPolicies = append(identityPolicies, verify.WithCertificateIdentity(certIdentity)) + } + + // Make some educated guesses about verification policy + verifierConfig := []verify.VerifierOption{} + + if len(trustedroot.RekorLogs()) > 0 && !ignoreTlog { + verifierConfig = append(verifierConfig, verify.WithTransparencyLog(1), verify.WithIntegratedTimestamps(1)) + } + + if len(trustedroot.TimestampingAuthorities()) > 0 && useSignedTimestamps { + verifierConfig = append(verifierConfig, verify.WithSignedTimestamps(1)) + } + + if !ignoreSCT { + verifierConfig = append(verifierConfig, verify.WithSignedCertificateTimestamps(1)) + } + + if ignoreTlog && !useSignedTimestamps { + verifierConfig = append(verifierConfig, verify.WithoutAnyObserverTimestampsUnsafe()) + } + + // Perform verification + payload, err := payloadBytes(artifactRef) + if err != nil { + return err + } + buf := bytes.NewBuffer(payload) + + sev, err := verify.NewSignedEntityVerifier(trustedmaterial, verifierConfig...) + if err != nil { + return err + } + + _, err = sev.Verify(bundle, verify.NewPolicy(verify.WithArtifact(buf), identityPolicies...)) + return err +} diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/common.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/common.go index a3ad5d7efb..a1aa8ebc35 100644 --- a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/common.go +++ b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/common.go @@ -18,6 +18,7 @@ import ( "errors" "hash" "io" + "io/fs" "os" ) @@ -27,7 +28,7 @@ const ( func FileExists(filename string) (bool, error) { info, err := os.Stat(filename) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return false, nil } if err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go index 3d86353c35..3b44da8842 100644 --- a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go +++ b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go @@ -56,6 +56,13 @@ func GetIntermediates() (*x509.CertPool, error) { return intermediates, singletonRootErr } +// ReInit reinitializes the global roots and intermediates, overriding the sync.Once lock. +// This is only to be used for tests, where the trust root environment variables may change after the roots are initialized in the module. +func ReInit() error { + roots, intermediates, singletonRootErr = initRoots() + return singletonRootErr +} + func initRoots() (*x509.CertPool, *x509.CertPool, error) { rootPool := x509.NewCertPool() // intermediatePool should be nil if no intermediates are found diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/errors.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/errors.go new file mode 100644 index 0000000000..5a7e055989 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/errors.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package payload + +import "fmt" + +// MaxLayerSizeExceeded is an error indicating that the layer is too big to read into memory and cosign should abort processing it. +type MaxLayerSizeExceeded struct { + value uint64 + maximum uint64 +} + +func NewMaxLayerSizeExceeded(value, maximum uint64) *MaxLayerSizeExceeded { + return &MaxLayerSizeExceeded{value, maximum} +} + +func (e *MaxLayerSizeExceeded) Error() string { + return fmt.Sprintf("size of layer (%d) exceeded the limit (%d)", e.value, e.maximum) +} diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/size.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/size.go new file mode 100644 index 0000000000..f867477c73 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size/size.go @@ -0,0 +1,38 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package payload + +import ( + "github.com/dustin/go-humanize" + "github.com/sigstore/cosign/v2/pkg/cosign/env" +) + +const defaultMaxSize = uint64(134217728) // 128MiB + +func CheckSize(size uint64) error { + maxSize := defaultMaxSize + maxSizeOverride, exists := env.LookupEnv(env.VariableMaxAttachmentSize) + if exists { + var err error + maxSize, err = humanize.ParseBytes(maxSizeOverride) + if err != nil { + maxSize = defaultMaxSize + } + } + if size > maxSize { + return NewMaxLayerSizeExceeded(size, maxSize) + } + return nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go b/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go new file mode 100644 index 0000000000..253a41f3f2 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/internal/pkg/now/now.go @@ -0,0 +1,38 @@ +// +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package now + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// Now returns SOURCE_DATE_EPOCH or time.Now(). +func Now() (time.Time, error) { + // nolint + epoch := os.Getenv("SOURCE_DATE_EPOCH") + if epoch == "" { + return time.Now(), nil + } + + seconds, err := strconv.ParseInt(epoch, 10, 64) + if err != nil { + return time.Now(), fmt.Errorf("SOURCE_DATE_EPOCH should be the number of seconds since January 1st 1970, 00:00 UTC, got: %w", err) + } + return time.Unix(seconds, 0), nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go new file mode 100644 index 0000000000..a26e2fb2bb --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/bundle/protobundle.go @@ -0,0 +1,65 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + protorekor "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/tle" +) + +const bundleV03MediaType = "application/vnd.dev.sigstore.bundle.v0.3+json" + +func MakeProtobufBundle(hint string, rawCert []byte, rekorEntry *models.LogEntryAnon, timestampBytes []byte) (*protobundle.Bundle, error) { + bundle := &protobundle.Bundle{MediaType: bundleV03MediaType} + + if hint != "" { + bundle.VerificationMaterial = &protobundle.VerificationMaterial{ + Content: &protobundle.VerificationMaterial_PublicKey{ + PublicKey: &protocommon.PublicKeyIdentifier{ + Hint: hint, + }, + }, + } + } else if len(rawCert) > 0 { + bundle.VerificationMaterial = &protobundle.VerificationMaterial{ + Content: &protobundle.VerificationMaterial_Certificate{ + Certificate: &protocommon.X509Certificate{ + RawBytes: rawCert, + }, + }, + } + } + + if len(timestampBytes) > 0 { + bundle.VerificationMaterial.TimestampVerificationData = &protobundle.TimestampVerificationData{ + Rfc3161Timestamps: []*protocommon.RFC3161SignedTimestamp{ + {SignedTimestamp: timestampBytes}, + }, + } + } + + if rekorEntry != nil { + tlogEntry, err := tle.GenerateTransparencyLogEntry(*rekorEntry) + if err != nil { + return nil, err + } + bundle.VerificationMaterial.TlogEntries = []*protorekor.TransparencyLogEntry{tlogEntry} + } + + return bundle, nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go index 5c26d4f516..016687acb8 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/env/env.go @@ -51,12 +51,14 @@ const ( VariablePKCS11ModulePath Variable = "COSIGN_PKCS11_MODULE_PATH" VariablePKCS11IgnoreCertificate Variable = "COSIGN_PKCS11_IGNORE_CERTIFICATE" VariableRepository Variable = "COSIGN_REPOSITORY" + VariableMaxAttachmentSize Variable = "COSIGN_MAX_ATTACHMENT_SIZE" // Sigstore environment variables VariableSigstoreCTLogPublicKeyFile Variable = "SIGSTORE_CT_LOG_PUBLIC_KEY_FILE" VariableSigstoreRootFile Variable = "SIGSTORE_ROOT_FILE" VariableSigstoreRekorPublicKey Variable = "SIGSTORE_REKOR_PUBLIC_KEY" VariableSigstoreIDToken Variable = "SIGSTORE_ID_TOKEN" //nolint:gosec + VariableSigstoreTSACertificateFile Variable = "SIGSTORE_TSA_CERTIFICATE_FILE" // Other external environment variables VariableGitHubHost Variable = "GITHUB_HOST" @@ -113,6 +115,11 @@ var ( Expects: "string with a repository", Sensitive: false, }, + VariableMaxAttachmentSize: { + Description: "maximum attachment size to download (default 128MiB)", + Expects: "human-readable unit of memory, e.g. 5120, 20K, 3M, 45MiB, 1GB", + Sensitive: false, + }, VariableSigstoreCTLogPublicKeyFile: { Description: "overrides what is used to validate the SCT coming back from Fulcio", @@ -132,7 +139,12 @@ var ( Sensitive: false, External: true, }, - + VariableSigstoreTSACertificateFile: { + Description: "path to the concatenated PEM-encoded TSA certificate file (leaf, intermediate(s), root) used by Sigstore", + Expects: "path to the TSA certificate file", + Sensitive: false, + External: true, + }, VariableGitHubHost: { Description: "is URL of the GitHub Enterprise instance", Expects: "string with the URL of GitHub Enterprise instance", diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go index 6aecb0b64f..d58e68d414 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/github/github.go @@ -70,19 +70,25 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro return fmt.Errorf("generating key pair: %w", err) } + var owner, repo string split := strings.Split(ref, "/") - if len(split) < 2 { - return errors.New("could not parse scheme, use github:/// format") + + switch len(split) { + case 2: + owner, repo = split[0], split[1] + case 1: + owner = split[0] + default: + return errors.New("could not parse scheme, use github:// or github:/// format") } - owner, repo := split[0], split[1] - key, getRepoPubKeyResp, err := client.Actions.GetRepoPublicKey(ctx, owner, repo) + key, getPubKeyResp, err := getPublicKey(ctx, client, owner, repo) if err != nil { return fmt.Errorf("could not get repository public key: %w", err) } - if getRepoPubKeyResp.StatusCode < 200 && getRepoPubKeyResp.StatusCode >= 300 { - bodyBytes, _ := io.ReadAll(getRepoPubKeyResp.Body) + if getPubKeyResp.StatusCode < 200 && getPubKeyResp.StatusCode >= 300 { + bodyBytes, _ := io.ReadAll(getPubKeyResp.Body) return fmt.Errorf("%s", bodyBytes) } @@ -91,7 +97,7 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro return fmt.Errorf("could not encrypt the secret: %w", err) } - passwordSecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPasswd) + passwordSecretEnvResp, err := createOrUpdateOrgSecret(ctx, client, owner, repo, encryptedCosignPasswd) if err != nil { return fmt.Errorf("could not create \"COSIGN_PASSWORD\" github actions secret: %w", err) } @@ -108,7 +114,7 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro return fmt.Errorf("could not encrypt the secret: %w", err) } - privateKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPrivKey) + privateKeySecretEnvResp, err := createOrUpdateOrgSecret(ctx, client, owner, repo, encryptedCosignPrivKey) if err != nil { return fmt.Errorf("could not create \"COSIGN_PRIVATE_KEY\" github actions secret: %w", err) } @@ -125,7 +131,7 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro return fmt.Errorf("could not encrypt the secret: %w", err) } - publicKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPubKey) + publicKeySecretEnvResp, err := createOrUpdateOrgSecret(ctx, client, owner, repo, encryptedCosignPubKey) if err != nil { return fmt.Errorf("could not create \"COSIGN_PUBLIC_KEY\" github actions secret: %w", err) } @@ -150,6 +156,20 @@ func (g *Gh) GetSecret(ctx context.Context, ref string, key string) (string, err return "", nil } +func createOrUpdateOrgSecret(ctx context.Context, client *github.Client, owner string, repo string, encryptedCosignPasswd *github.EncryptedSecret) (*github.Response, error) { + if len(repo) > 0 { + return client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPasswd) + } + return client.Actions.CreateOrUpdateOrgSecret(ctx, owner, encryptedCosignPasswd) +} + +func getPublicKey(ctx context.Context, client *github.Client, owner string, repo string) (*github.PublicKey, *github.Response, error) { + if len(repo) > 0 { + return client.Actions.GetRepoPublicKey(ctx, owner, repo) + } + return client.Actions.GetOrgPublicKey(ctx, owner) +} + func encryptSecretWithPublicKey(publicKey *github.PublicKey, secretName string, secretValue []byte) (*github.EncryptedSecret, error) { decodedPubKey, err := base64.StdEncoding.DecodeString(publicKey.GetKey()) if err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go index 9adc22525f..ed5bbc16d4 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/keys.go @@ -58,7 +58,6 @@ type Keys struct { public crypto.PublicKey } -// TODO(jason): Move this to an internal package. type KeysBytes struct { PrivateBytes []byte PublicBytes []byte @@ -69,12 +68,17 @@ func (k *KeysBytes) Password() []byte { return k.password } -// TODO(jason): Move this to an internal package. +// GeneratePrivateKey generates an ECDSA private key with the P-256 curve. func GeneratePrivateKey() (*ecdsa.PrivateKey, error) { return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } -// TODO(jason): Move this to the only place it's used in cmd/cosign/cli/importkeypair, and unexport it. +// ImportKeyPair imports a key pair from a file containing a PEM-encoded +// private key encoded with a password provided by the 'pf' function. +// The private key can be in one of the following formats: +// - RSA private key (PKCS #1) +// - ECDSA private key +// - PKCS #8 private key (RSA, ECDSA or ED25519). func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { kb, err := os.ReadFile(filepath.Clean(keyPath)) if err != nil { @@ -180,7 +184,6 @@ func marshalKeyPair(ptype string, keypair Keys, pf PassFunc) (key *KeysBytes, er }, nil } -// TODO(jason): Move this to an internal package. func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { priv, err := GeneratePrivateKey() if err != nil { @@ -191,7 +194,7 @@ func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { return marshalKeyPair(SigstorePrivateKeyPemType, Keys{priv, priv.Public()}, pf) } -// TODO(jason): Move this to an internal package. +// PemToECDSAKey marshals and returns the PEM-encoded ECDSA public key. func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { pub, err := cryptoutils.UnmarshalPEMToPublicKey(pemBytes) if err != nil { @@ -204,7 +207,8 @@ func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { return ecdsaPub, nil } -// TODO(jason): Move this to pkg/signature, the only place it's used, and unimport it. +// LoadPrivateKey loads a cosign PEM private key encrypted with the given passphrase, +// and returns a SignerVerifier instance. The private key must be in the PKCS #8 format. func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { // Decrypt first p, _ := pem.Decode(key) @@ -219,7 +223,6 @@ func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { if err != nil { return nil, fmt.Errorf("decrypt: %w", err) } - pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) if err != nil { return nil, fmt.Errorf("parsing private key: %w", err) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go new file mode 100644 index 0000000000..9d1c17a333 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/tsa.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cosign + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "os" + + "github.com/sigstore/cosign/v2/pkg/cosign/env" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/tuf" +) + +const ( + tsaLeafCertStr = `tsa_leaf.crt.pem` + tsaRootCertStr = `tsa_root.crt.pem` + tsaIntermediateCertStrPattern = `tsa_intermediate_%d.crt.pem` +) + +type TSACertificates struct { + LeafCert *x509.Certificate + IntermediateCerts []*x509.Certificate + RootCert []*x509.Certificate +} + +type GetTargetStub func(ctx context.Context, usage tuf.UsageKind, names []string) ([]byte, error) + +func GetTufTargets(ctx context.Context, usage tuf.UsageKind, names []string) ([]byte, error) { + tufClient, err := tuf.NewFromEnv(ctx) + if err != nil { + return nil, fmt.Errorf("error creating TUF client: %w", err) + } + targets, err := tufClient.GetTargetsByMeta(usage, names) + if err != nil { + return nil, fmt.Errorf("error fetching targets by metadata with usage %v: %w", usage, err) + } + + var buffer bytes.Buffer + for _, target := range targets { + buffer.Write(target.Target) + buffer.WriteByte('\n') + } + return buffer.Bytes(), nil +} + +func isTufTargetExist(ctx context.Context, name string) (bool, error) { + tufClient, err := tuf.NewFromEnv(ctx) + if err != nil { + return false, fmt.Errorf("error creating TUF client: %w", err) + } + _, err = tufClient.GetTarget(name) + if err != nil { + return false, nil + } + return true, nil +} + +// GetTSACerts retrieves trusted TSA certificates from the embedded or cached +// TUF root. If expired, makes a network call to retrieve the updated targets. +// By default, the certificates come from TUF, but you can override this for test +// purposes by using an env variable `SIGSTORE_TSA_CERTIFICATE_FILE` or a file path +// specified in `TSACertChainPath`. If using an alternate, the file should be in PEM format. +func GetTSACerts(ctx context.Context, certChainPath string, fn GetTargetStub) (*TSACertificates, error) { + altTSACert := env.Getenv(env.VariableSigstoreTSACertificateFile) + + var raw []byte + var err error + var exists bool + switch { + case altTSACert != "": + raw, err = os.ReadFile(altTSACert) + case certChainPath != "": + raw, err = os.ReadFile(certChainPath) + default: + certNames := []string{tsaLeafCertStr, tsaRootCertStr} + for i := 0; ; i++ { + intermediateCertStr := fmt.Sprintf(tsaIntermediateCertStrPattern, i) + exists, err = isTufTargetExist(ctx, intermediateCertStr) + if err != nil { + return nil, fmt.Errorf("error fetching TSA certificates: %w", err) + } + if !exists { + break + } + certNames = append(certNames, intermediateCertStr) + } + raw, err = fn(ctx, tuf.TSA, certNames) + if err != nil { + return nil, fmt.Errorf("error fetching TSA certificates: %w", err) + } + } + + if err != nil { + return nil, fmt.Errorf("error reading TSA certificate file: %w", err) + } + + leaves, intermediates, roots, err := splitPEMCertificateChain(raw) + if err != nil { + return nil, fmt.Errorf("error splitting TSA certificates: %w", err) + } + + if len(leaves) != 1 { + return nil, fmt.Errorf("TSA certificate chain must contain exactly one leaf certificate") + } + + if len(roots) == 0 { + return nil, fmt.Errorf("TSA certificate chain must contain at least one root certificate") + } + + return &TSACertificates{ + LeafCert: leaves[0], + IntermediateCerts: intermediates, + RootCert: roots, + }, nil +} + +// splitPEMCertificateChain returns a list of leaf (non-CA) certificates, a certificate pool for +// intermediate CA certificates, and a certificate pool for root CA certificates +func splitPEMCertificateChain(pem []byte) (leaves, intermediates, roots []*x509.Certificate, err error) { + certs, err := cryptoutils.UnmarshalCertificatesFromPEM(pem) + if err != nil { + return nil, nil, nil, err + } + + for _, cert := range certs { + if !cert.IsCA { + leaves = append(leaves, cert) + } else { + // root certificates are self-signed + if bytes.Equal(cert.RawSubject, cert.RawIssuer) { + roots = append(roots, cert) + } else { + intermediates = append(intermediates, cert) + } + } + } + + return leaves, intermediates, roots, nil +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go index 141a07eea5..3ab5d76026 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go @@ -27,6 +27,7 @@ import ( "encoding/json" "encoding/pem" "fmt" + "io/fs" "net/http" "os" "regexp" @@ -592,8 +593,8 @@ func verifySignatures(ctx context.Context, sigs oci.Signatures, h v1.Hash, co *C } if len(sl) == 0 { - return nil, false, &ErrNoMatchingSignatures{ - errors.New("no matching signatures"), + return nil, false, &ErrNoSignaturesFound{ + errors.New("no signatures found"), } } @@ -709,6 +710,7 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, } t := time.Unix(*e.IntegratedTime, 0) acceptableRekorBundleTime = &t + bundleVerified = true } } @@ -834,7 +836,7 @@ func loadSignatureFromFile(ctx context.Context, sigRef string, signedImgRef name var b64sig string targetSig, err := blob.LoadFileOrURL(sigRef) if err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { return nil, err } targetSig = []byte(sigRef) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go index 934b2d9794..1b904c2c4f 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify_sct.go @@ -108,7 +108,7 @@ func VerifySCT(_ context.Context, certPEM, chainPEM, rawSCT []byte, pubKeys *Tru } err = ctutil.VerifySCT(pubKeyMetadata.PubKey, []*ctx509.Certificate{cert, certChain[0]}, sct, true) if err != nil { - return fmt.Errorf("error verifying embedded SCT") + return fmt.Errorf("error verifying embedded SCT: %w", err) } if pubKeyMetadata.Status != tuf.Active { fmt.Fprintf(os.Stderr, "**Info** Successfully verified embedded SCT using an expired verification key\n") diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/errors.go similarity index 52% rename from vendor/go.opentelemetry.io/otel/sdk/internal/internal.go rename to vendor/github.com/sigstore/cosign/v2/pkg/oci/errors.go index dfeaaa8ca0..aa0c2985b0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/errors.go @@ -1,4 +1,4 @@ -// Copyright The OpenTelemetry Authors +// Copyright 2024 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal // import "go.opentelemetry.io/otel/sdk/internal" +package oci -import "time" +import "fmt" -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) +// MaxLayersExceeded is an error indicating that the artifact has too many layers and cosign should abort processing it. +type MaxLayersExceeded struct { + value int64 + maximum int64 +} + +func NewMaxLayersExceeded(value, maximum int64) *MaxLayersExceeded { + return &MaxLayersExceeded{value, maximum} +} + +func (e *MaxLayersExceeded) Error() string { + return fmt.Sprintf("number of layers (%d) exceeded the limit (%d)", e.value, e.maximum) } diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go index 251d3edc09..d92af61c30 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/internal/signature/layer.go @@ -24,6 +24,7 @@ import ( "strings" v1 "github.com/google/go-containerregistry/pkg/v1" + payloadsize "github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size" "github.com/sigstore/cosign/v2/pkg/cosign/bundle" "github.com/sigstore/cosign/v2/pkg/oci" "github.com/sigstore/sigstore/pkg/cryptoutils" @@ -58,11 +59,20 @@ func (s *sigLayer) Annotations() (map[string]string, error) { // Payload implements oci.Signature func (s *sigLayer) Payload() ([]byte, error) { + size, err := s.Layer.Size() + if err != nil { + return nil, err + } + err = payloadsize.CheckSize(uint64(size)) + if err != nil { + return nil, err + } // Compressed is a misnomer here, we just want the raw bytes from the registry. r, err := s.Layer.Compressed() if err != nil { return nil, err } + defer r.Close() payload, err := io.ReadAll(r) if err != nil { return nil, err diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/layout/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/layout/signatures.go index c9f24866cf..1b0d4c0233 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/layout/signatures.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/layout/signatures.go @@ -21,6 +21,8 @@ import ( "github.com/sigstore/cosign/v2/pkg/oci/internal/signature" ) +const maxLayers = 1000 + type sigs struct { v1.Image } @@ -33,7 +35,11 @@ func (s *sigs) Get() ([]oci.Signature, error) { if err != nil { return nil, err } - signatures := make([]oci.Signature, 0, len(manifest.Layers)) + numLayers := int64(len(manifest.Layers)) + if numLayers > maxLayers { + return nil, oci.NewMaxLayersExceeded(numLayers, maxLayers) + } + signatures := make([]oci.Signature, 0, numLayers) for _, desc := range manifest.Layers { l, err := s.Image.LayerByDigest(desc.Digest) if err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go index f934cb358d..59ba2c0c0f 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/mutate.go @@ -377,5 +377,5 @@ func (so *signOpts) dedupeAndReplace(sig oci.Signature, basefn func() (oci.Signa } return ReplaceSignatures(replace) } - return AppendSignatures(base, sig) + return AppendSignatures(base, so.rct, sig) } diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go index a4c41b6fa9..342eea4e7c 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/options.go @@ -33,8 +33,9 @@ type ReplaceOp interface { type SignOption func(*signOpts) type signOpts struct { - dd DupeDetector - ro ReplaceOp + dd DupeDetector + ro ReplaceOp + rct bool } func makeSignOpts(opts ...SignOption) *signOpts { @@ -59,6 +60,12 @@ func WithReplaceOp(ro ReplaceOp) SignOption { } } +func WithRecordCreationTimestamp(rct bool) SignOption { + return func(so *signOpts) { + so.rct = rct + } +} + type signatureOpts struct { annotations map[string]string bundle *bundle.RekorBundle diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go index 648cab0414..2a7c077ec2 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/mutate/signatures.go @@ -19,12 +19,15 @@ import ( v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/sigstore/cosign/v2/internal/pkg/now" "github.com/sigstore/cosign/v2/pkg/oci" ) +const maxLayers = 1000 + // AppendSignatures produces a new oci.Signatures with the provided signatures // appended to the provided base signatures. -func AppendSignatures(base oci.Signatures, sigs ...oci.Signature) (oci.Signatures, error) { +func AppendSignatures(base oci.Signatures, recordCreationTimestamp bool, sigs ...oci.Signature) (oci.Signatures, error) { adds := make([]mutate.Addendum, 0, len(sigs)) for _, sig := range sigs { ann, err := sig.Annotations() @@ -42,6 +45,19 @@ func AppendSignatures(base oci.Signatures, sigs ...oci.Signature) (oci.Signature return nil, err } + if recordCreationTimestamp { + t, err := now.Now() + if err != nil { + return nil, err + } + + // Set the Created date to time of execution + img, err = mutate.CreatedAt(img, v1.Time{Time: t}) + if err != nil { + return nil, err + } + } + return &sigAppender{ Image: img, base: base, @@ -92,5 +108,9 @@ func (sa *sigAppender) Get() ([]oci.Signature, error) { if err != nil { return nil, err } + sumLayers := int64(len(sl) + len(sa.sigs)) + if sumLayers > maxLayers { + return nil, oci.NewMaxLayersExceeded(sumLayers, maxLayers) + } return append(sl, sa.sigs...), nil } diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go index 5879833941..8c6eda5ff0 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/image.go @@ -21,6 +21,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/sigstore/cosign/v2/pkg/oci" ) @@ -48,6 +49,12 @@ type image struct { opt *options } +// The wrapped Image implements ConfigLayer, but the wrapping hides that from typechecks in pkg/v1/remote. +// Make image explicitly implement ConfigLayer so that this returns a mountable config layer for pkg/v1/remote. +func (i *image) ConfigLayer() (v1.Layer, error) { + return partial.ConfigLayer(i.Image) +} + var _ oci.SignedImage = (*image)(nil) // Signatures implements oci.SignedImage diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go index 7827407ce3..eab4e1f9b0 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/types" + payloadsize "github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size" ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote" "github.com/sigstore/cosign/v2/pkg/oci" ) @@ -226,6 +227,15 @@ func (f *attached) FileMediaType() (types.MediaType, error) { // Payload implements oci.File func (f *attached) Payload() ([]byte, error) { + size, err := f.layer.Size() + if err != nil { + return nil, err + } + err = payloadsize.CheckSize(uint64(size)) + if err != nil { + return nil, err + } + // remote layers are believed to be stored // compressed, but we don't compress attachments // so use "Compressed" to access the raw byte diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go index 635b5e9e07..825d80727f 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/signatures.go @@ -21,12 +21,15 @@ import ( "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/sigstore/cosign/v2/pkg/oci" "github.com/sigstore/cosign/v2/pkg/oci/empty" "github.com/sigstore/cosign/v2/pkg/oci/internal/signature" ) +const maxLayers = 1000 + // Signatures fetches the signatures image represented by the named reference. // If the tag is not found, this returns an empty oci.Signatures. func Signatures(ref name.Reference, opts ...Option) (oci.Signatures, error) { @@ -50,6 +53,12 @@ type sigs struct { v1.Image } +// The wrapped Image implements ConfigLayer, but the wrapping hides that from typechecks in pkg/v1/remote. +// Make sigs explicitly implement ConfigLayer so that this returns a mountable config layer for pkg/v1/remote. +func (s *sigs) ConfigLayer() (v1.Layer, error) { + return partial.ConfigLayer(s.Image) +} + var _ oci.Signatures = (*sigs)(nil) // Get implements oci.Signatures @@ -58,6 +67,10 @@ func (s *sigs) Get() ([]oci.Signature, error) { if err != nil { return nil, err } + numLayers := int64(len(m.Layers)) + if numLayers > maxLayers { + return nil, oci.NewMaxLayersExceeded(numLayers, maxLayers) + } signatures := make([]oci.Signature, 0, len(m.Layers)) for _, desc := range m.Layers { layer, err := s.Image.LayerByDigest(desc.Digest) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go index 6fc55d8311..18ec65c3af 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/file.go @@ -22,6 +22,8 @@ import ( "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/types" + payloadsize "github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size" + "github.com/sigstore/cosign/v2/internal/pkg/now" "github.com/sigstore/cosign/v2/pkg/oci" "github.com/sigstore/cosign/v2/pkg/oci/signed" ) @@ -48,6 +50,19 @@ func NewFile(payload []byte, opts ...Option) (oci.File, error) { // Add annotations from options img = mutate.Annotations(img, o.Annotations).(v1.Image) + if o.RecordCreationTimestamp { + t, err := now.Now() + if err != nil { + return nil, err + } + + // Set the Created date to time of execution + img, err = mutate.CreatedAt(img, v1.Time{Time: t}) + if err != nil { + return nil, err + } + } + return &file{ SignedImage: signed.Image(img), layer: layer, @@ -68,6 +83,14 @@ func (f *file) FileMediaType() (types.MediaType, error) { // Payload implements oci.File func (f *file) Payload() ([]byte, error) { + size, err := f.layer.Size() + if err != nil { + return nil, err + } + err = payloadsize.CheckSize(uint64(size)) + if err != nil { + return nil, err + } rc, err := f.layer.Uncompressed() if err != nil { return nil, err diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go index 100c988e98..b240fb228a 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/static/options.go @@ -27,13 +27,14 @@ import ( type Option func(*options) type options struct { - LayerMediaType types.MediaType - ConfigMediaType types.MediaType - Bundle *bundle.RekorBundle - RFC3161Timestamp *bundle.RFC3161Timestamp - Cert []byte - Chain []byte - Annotations map[string]string + LayerMediaType types.MediaType + ConfigMediaType types.MediaType + Bundle *bundle.RekorBundle + RFC3161Timestamp *bundle.RFC3161Timestamp + Cert []byte + Chain []byte + Annotations map[string]string + RecordCreationTimestamp bool } func makeOptions(opts ...Option) (*options, error) { @@ -112,3 +113,10 @@ func WithCertChain(cert, chain []byte) Option { o.Chain = chain } } + +// WithRecordCreationTimestamp sets the feature flag to honor the creation timestamp to time of running +func WithRecordCreationTimestamp(rct bool) Option { + return func(o *options) { + o.RecordCreationTimestamp = rct + } +} diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/policy/attestation.go b/vendor/github.com/sigstore/cosign/v2/pkg/policy/attestation.go index 9d32d173e8..1fa82bf8a6 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/policy/attestation.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/policy/attestation.go @@ -29,6 +29,17 @@ import ( "github.com/sigstore/cosign/v2/pkg/cosign/attestation" ) +// PayloadProvider is a subset of oci.Signature that only provides the +// Payload() method. +type PayloadProvider interface { + // Payload fetches the opaque data that is being signed. + // This will always return data when there is no error. + Payload() ([]byte, error) +} + +// Assert that oci.Signature implements PayloadProvider +var _ PayloadProvider = (oci.Signature)(nil) + // AttestationToPayloadJSON takes in a verified Attestation (oci.Signature) and // marshals it into a JSON depending on the payload that's then consumable // by policy engine like cue, rego, etc. @@ -45,7 +56,7 @@ import ( // or the predicateType is not the one they are looking for. Without returning // this, it's hard for users to know which attestations/predicateTypes were // inspected. -func AttestationToPayloadJSON(_ context.Context, predicateType string, verifiedAttestation oci.Signature) ([]byte, string, error) { +func AttestationToPayloadJSON(_ context.Context, predicateType string, verifiedAttestation PayloadProvider) ([]byte, string, error) { if predicateType == "" { return nil, "", errors.New("missing predicate type") } diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/providers/interface.go b/vendor/github.com/sigstore/cosign/v2/pkg/providers/interface.go index 30d595aa5b..000b94908b 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/providers/interface.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/providers/interface.go @@ -24,12 +24,12 @@ import ( var ( m sync.Mutex - providers []providerEntry + providers []ProviderEntry ) -type providerEntry struct { - name string - p Interface +type ProviderEntry struct { + Name string + Provider Interface } // Interface is what providers need to implement to participate in furnishing OIDC tokens. @@ -47,11 +47,11 @@ func Register(name string, p Interface) { defer m.Unlock() for _, pe := range providers { - if pe.name == name { - panic(fmt.Sprintf("duplicate provider for name %q, %T and %T", name, pe.p, p)) + if pe.Name == name { + panic(fmt.Sprintf("duplicate provider for name %q, %T and %T", name, pe.Provider, p)) } } - providers = append(providers, providerEntry{name: name, p: p}) + providers = append(providers, ProviderEntry{Name: name, Provider: p}) } // Enabled checks whether any of the registered providers are enabled in this execution context. @@ -59,8 +59,8 @@ func Enabled(ctx context.Context) bool { m.Lock() defer m.Unlock() - for _, provider := range providers { - if provider.p.Enabled(ctx) { + for _, pe := range providers { + if pe.Provider.Enabled(ctx) { return true } } @@ -74,13 +74,14 @@ func Provide(ctx context.Context, audience string) (string, error) { var id string var err error - for _, provider := range providers { - if !provider.p.Enabled(ctx) { + for _, pe := range providers { + p := pe.Provider + if !p.Enabled(ctx) { continue } - id, err = provider.p.Provide(ctx, audience) + id, err = p.Provide(ctx, audience) if err == nil { - return id, err + return id, nil } } // return the last id/err combo, unless there wasn't an error in @@ -97,9 +98,16 @@ func ProvideFrom(_ context.Context, provider string) (Interface, error) { defer m.Unlock() for _, p := range providers { - if p.name == provider { - return p.p, nil + if p.Name == provider { + return p.Provider, nil } } return nil, fmt.Errorf("%s is not a valid provider", provider) } + +func Providers() []ProviderEntry { + m.Lock() + defer m.Unlock() + + return providers +} diff --git a/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt new file mode 100644 index 0000000000..8b16ae0122 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2022 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/protobuf-specs/LICENSE b/vendor/github.com/sigstore/protobuf-specs/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go new file mode 100644 index 0000000000..b3f44d1f9f --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go @@ -0,0 +1,580 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +// source: sigstore_bundle.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + v11 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Various timestamped counter signatures over the artifacts signature. +// Currently only RFC3161 signatures are provided. More formats may be added +// in the future. +type TimestampVerificationData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of RFC3161 signed timestamps provided by the user. + // This can be used when the entry has not been stored on a + // transparency log, or in conjunction for a stronger trust model. + // Clients MUST verify the hashed message in the message imprint + // against the signature in the bundle. + Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"` +} + +func (x *TimestampVerificationData) Reset() { + *x = TimestampVerificationData{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimestampVerificationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampVerificationData) ProtoMessage() {} + +func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimestampVerificationData.ProtoReflect.Descriptor instead. +func (*TimestampVerificationData) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{0} +} + +func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTimestamp { + if x != nil { + return x.Rfc3161Timestamps + } + return nil +} + +// VerificationMaterial captures details on the materials used to verify +// signatures. This message may be embedded in a DSSE envelope as a signature +// extension. Specifically, the `ext` field of the extension will expect this +// message when the signature extension is for Sigstore. This is identified by +// the `kind` field in the extension, which must be set to +// application/vnd.dev.sigstore.verificationmaterial;version=0.1 for Sigstore. +// When used as a DSSE extension, if the `public_key` field is used to indicate +// the key identifier, it MUST match the `keyid` field of the signature the +// extension is attached to. +type VerificationMaterial struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key material for verification purposes. + // + // This allows key material to be conveyed in one of three forms: + // + // 1. An unspecified public key identifier, for retrieving a key + // from an out-of-band mechanism (such as a keyring); + // + // 2. A sequence of one or more X.509 certificates, of which the first member + // MUST be a leaf certificate conveying the signing key. Subsequent members + // SHOULD be in issuing order, meaning that `n + 1` should be an issuer for `n`. + // + // Signers MUST NOT include root CA certificates in bundles, and SHOULD NOT + // include intermediate CA certificates that appear in an independent root of trust + // (such as the Public Good Instance's trusted root). + // + // Verifiers MUST validate the chain carefully to ensure that it chains up + // to a CA certificate that they independently trust. Verifiers SHOULD + // handle old or non-complying bundles that have superfluous intermediate and/or + // root CA certificates by either ignoring them or explicitly considering them + // untrusted for the purposes of chain building. + // + // 3. A single X.509 certificate, which MUST be a leaf certificate conveying + // the signing key. + // + // When used with the Public Good Instance (PGI) of Sigstore for "keyless" signing + // via Fulcio, form (1) MUST NOT be used, regardless of bundle version. Form (1) + // MAY be used with the PGI for self-managed keys. + // + // When used in a `0.1` or `0.2` bundle with the PGI and "keyless" signing, + // form (2) MUST be used. + // + // When used in a `0.3` bundle with the PGI and "keyless" signing, + // form (3) MUST be used. + // + // Types that are assignable to Content: + // + // *VerificationMaterial_PublicKey + // *VerificationMaterial_X509CertificateChain + // *VerificationMaterial_Certificate + Content isVerificationMaterial_Content `protobuf_oneof:"content"` + // An inclusion proof and an optional signed timestamp from the log. + // Client verification libraries MAY provide an option to support v0.1 + // bundles for backwards compatibility, which may contain an inclusion + // promise and not an inclusion proof. In this case, the client MUST + // validate the promise. + // Verifiers SHOULD NOT allow v0.1 bundles if they're used in an + // ecosystem which never produced them. + TlogEntries []*v11.TransparencyLogEntry `protobuf:"bytes,3,rep,name=tlog_entries,json=tlogEntries,proto3" json:"tlog_entries,omitempty"` + // Timestamp may also come from + // tlog_entries.inclusion_promise.signed_entry_timestamp. + TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"` +} + +func (x *VerificationMaterial) Reset() { + *x = VerificationMaterial{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerificationMaterial) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerificationMaterial) ProtoMessage() {} + +func (x *VerificationMaterial) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerificationMaterial.ProtoReflect.Descriptor instead. +func (*VerificationMaterial) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{1} +} + +func (m *VerificationMaterial) GetContent() isVerificationMaterial_Content { + if m != nil { + return m.Content + } + return nil +} + +func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier { + if x, ok := x.GetContent().(*VerificationMaterial_PublicKey); ok { + return x.PublicKey + } + return nil +} + +func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain { + if x, ok := x.GetContent().(*VerificationMaterial_X509CertificateChain); ok { + return x.X509CertificateChain + } + return nil +} + +func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate { + if x, ok := x.GetContent().(*VerificationMaterial_Certificate); ok { + return x.Certificate + } + return nil +} + +func (x *VerificationMaterial) GetTlogEntries() []*v11.TransparencyLogEntry { + if x != nil { + return x.TlogEntries + } + return nil +} + +func (x *VerificationMaterial) GetTimestampVerificationData() *TimestampVerificationData { + if x != nil { + return x.TimestampVerificationData + } + return nil +} + +type isVerificationMaterial_Content interface { + isVerificationMaterial_Content() +} + +type VerificationMaterial_PublicKey struct { + PublicKey *v1.PublicKeyIdentifier `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"` +} + +type VerificationMaterial_X509CertificateChain struct { + X509CertificateChain *v1.X509CertificateChain `protobuf:"bytes,2,opt,name=x509_certificate_chain,json=x509CertificateChain,proto3,oneof"` +} + +type VerificationMaterial_Certificate struct { + Certificate *v1.X509Certificate `protobuf:"bytes,5,opt,name=certificate,proto3,oneof"` +} + +func (*VerificationMaterial_PublicKey) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content() {} + +func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {} + +type Bundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when + // when encoded as JSON. + // Clients must to be able to accept media type using the previously + // defined formats: + // * application/vnd.dev.sigstore.bundle+json;version=0.1 + // * application/vnd.dev.sigstore.bundle+json;version=0.2 + // * application/vnd.dev.sigstore.bundle+json;version=0.3 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // When a signer is identified by a X.509 certificate, a verifier MUST + // verify that the signature was computed at the time the certificate + // was valid as described in the Sigstore client spec: "Verification + // using a Bundle". + // + // If the verification material contains a public key identifier + // (key hint) and the `content` is a DSSE envelope, the key hints + // MUST be exactly the same in the verification material and in the + // DSSE envelope. + VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` + // Types that are assignable to Content: + // + // *Bundle_MessageSignature + // *Bundle_DsseEnvelope + Content isBundle_Content `protobuf_oneof:"content"` +} + +func (x *Bundle) Reset() { + *x = Bundle{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_bundle_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_sigstore_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *Bundle) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { + if x != nil { + return x.VerificationMaterial + } + return nil +} + +func (m *Bundle) GetContent() isBundle_Content { + if m != nil { + return m.Content + } + return nil +} + +func (x *Bundle) GetMessageSignature() *v1.MessageSignature { + if x, ok := x.GetContent().(*Bundle_MessageSignature); ok { + return x.MessageSignature + } + return nil +} + +func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { + if x, ok := x.GetContent().(*Bundle_DsseEnvelope); ok { + return x.DsseEnvelope + } + return nil +} + +type isBundle_Content interface { + isBundle_Content() +} + +type Bundle_MessageSignature struct { + MessageSignature *v1.MessageSignature `protobuf:"bytes,3,opt,name=message_signature,json=messageSignature,proto3,oneof"` +} + +type Bundle_DsseEnvelope struct { + // A DSSE envelope can contain arbitrary payloads. + // Verifiers must verify that the payload type is a + // supported and expected type. This is part of the DSSE + // protocol which is defined here: + // + // DSSE envelopes in a bundle MUST have exactly one signture. + // This is a limitation from the DSSE spec, as it can contain + // multiple signatures. There are two primary reasons: + // 1. It simplfies the verification logic and policy + // 2. The bundle (currently) can only contain a single + // instance of the required verification materials + // + // During verification a client MUST reject an envelope if + // the number of signatures is not equal to one. + DsseEnvelope *dsse.Envelope `protobuf:"bytes,4,opt,name=dsse_envelope,json=dsseEnvelope,proto3,oneof"` +} + +func (*Bundle_MessageSignature) isBundle_Content() {} + +func (*Bundle_DsseEnvelope) isBundle_Content() {} + +var File_sigstore_bundle_proto protoreflect.FileDescriptor + +var file_sigstore_bundle_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a, + 0x19, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x12, 0x72, 0x66, + 0x63, 0x33, 0x31, 0x36, 0x31, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x72, 0x66, 0x63, 0x33, 0x31, 0x36, 0x31, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x14, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x69, 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x14, 0x78, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x12, 0x50, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, + 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x74, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0xbf, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x5c, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x73, 0x73, 0x65, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x33, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, + 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, + 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_bundle_proto_rawDescOnce sync.Once + file_sigstore_bundle_proto_rawDescData = file_sigstore_bundle_proto_rawDesc +) + +func file_sigstore_bundle_proto_rawDescGZIP() []byte { + file_sigstore_bundle_proto_rawDescOnce.Do(func() { + file_sigstore_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_bundle_proto_rawDescData) + }) + return file_sigstore_bundle_proto_rawDescData +} + +var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sigstore_bundle_proto_goTypes = []interface{}{ + (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData + (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial + (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle + (*v1.RFC3161SignedTimestamp)(nil), // 3: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*v1.PublicKeyIdentifier)(nil), // 4: dev.sigstore.common.v1.PublicKeyIdentifier + (*v1.X509CertificateChain)(nil), // 5: dev.sigstore.common.v1.X509CertificateChain + (*v1.X509Certificate)(nil), // 6: dev.sigstore.common.v1.X509Certificate + (*v11.TransparencyLogEntry)(nil), // 7: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.MessageSignature)(nil), // 8: dev.sigstore.common.v1.MessageSignature + (*dsse.Envelope)(nil), // 9: io.intoto.Envelope +} +var file_sigstore_bundle_proto_depIdxs = []int32{ + 3, // 0: dev.sigstore.bundle.v1.TimestampVerificationData.rfc3161_timestamps:type_name -> dev.sigstore.common.v1.RFC3161SignedTimestamp + 4, // 1: dev.sigstore.bundle.v1.VerificationMaterial.public_key:type_name -> dev.sigstore.common.v1.PublicKeyIdentifier + 5, // 2: dev.sigstore.bundle.v1.VerificationMaterial.x509_certificate_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 6, // 3: dev.sigstore.bundle.v1.VerificationMaterial.certificate:type_name -> dev.sigstore.common.v1.X509Certificate + 7, // 4: dev.sigstore.bundle.v1.VerificationMaterial.tlog_entries:type_name -> dev.sigstore.rekor.v1.TransparencyLogEntry + 0, // 5: dev.sigstore.bundle.v1.VerificationMaterial.timestamp_verification_data:type_name -> dev.sigstore.bundle.v1.TimestampVerificationData + 1, // 6: dev.sigstore.bundle.v1.Bundle.verification_material:type_name -> dev.sigstore.bundle.v1.VerificationMaterial + 8, // 7: dev.sigstore.bundle.v1.Bundle.message_signature:type_name -> dev.sigstore.common.v1.MessageSignature + 9, // 8: dev.sigstore.bundle.v1.Bundle.dsse_envelope:type_name -> io.intoto.Envelope + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_bundle_proto_init() } +func file_sigstore_bundle_proto_init() { + if File_sigstore_bundle_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sigstore_bundle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimestampVerificationData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_bundle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerificationMaterial); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_bundle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*VerificationMaterial_PublicKey)(nil), + (*VerificationMaterial_X509CertificateChain)(nil), + (*VerificationMaterial_Certificate)(nil), + } + file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Bundle_MessageSignature)(nil), + (*Bundle_DsseEnvelope)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_bundle_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_bundle_proto_goTypes, + DependencyIndexes: file_sigstore_bundle_proto_depIdxs, + MessageInfos: file_sigstore_bundle_proto_msgTypes, + }.Build() + File_sigstore_bundle_proto = out.File + file_sigstore_bundle_proto_rawDesc = nil + file_sigstore_bundle_proto_goTypes = nil + file_sigstore_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go new file mode 100644 index 0000000000..0982c674c7 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -0,0 +1,1451 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +// source: sigstore_common.proto + +package v1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Only a subset of the secure hash standard algorithms are supported. +// See for more +// details. +// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force +// any proto JSON serialization to emit the used hash algorithm, as default +// option is to *omit* the default value of an enum (which is the first +// value, represented by '0'. +type HashAlgorithm int32 + +const ( + HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED HashAlgorithm = 0 + HashAlgorithm_SHA2_256 HashAlgorithm = 1 + HashAlgorithm_SHA2_384 HashAlgorithm = 2 + HashAlgorithm_SHA2_512 HashAlgorithm = 3 + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + HashAlgorithm_SHA3_384 HashAlgorithm = 5 +) + +// Enum value maps for HashAlgorithm. +var ( + HashAlgorithm_name = map[int32]string{ + 0: "HASH_ALGORITHM_UNSPECIFIED", + 1: "SHA2_256", + 2: "SHA2_384", + 3: "SHA2_512", + 4: "SHA3_256", + 5: "SHA3_384", + } + HashAlgorithm_value = map[string]int32{ + "HASH_ALGORITHM_UNSPECIFIED": 0, + "SHA2_256": 1, + "SHA2_384": 2, + "SHA2_512": 3, + "SHA3_256": 4, + "SHA3_384": 5, + } +) + +func (x HashAlgorithm) Enum() *HashAlgorithm { + p := new(HashAlgorithm) + *p = x + return p +} + +func (x HashAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HashAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[0].Descriptor() +} + +func (HashAlgorithm) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[0] +} + +func (x HashAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HashAlgorithm.Descriptor instead. +func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +// Details of a specific public key, capturing the the key encoding method, +// and signature algorithm. +// +// PublicKeyDetails captures the public key/hash algorithm combinations +// recommended in the Sigstore ecosystem. +// +// This is modelled as a linear set as we want to provide a small number of +// opinionated options instead of allowing every possible permutation. +// +// Any changes to this enum MUST be reflected in the algorithm registry. +// See: docs/algorithm-registry.md +// +// To avoid the possibility of contradicting formats such as PKCS1 with +// ED25519 the valid permutations are listed as a linear set instead of a +// cartesian set (i.e one combined variable instead of two, one for encoding +// and one for the signature algorithm). +type PublicKeyDetails int32 + +const ( + PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 + // RSA + // + // Deprecated: Do not use. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Do not use. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Do not use. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Do not use. + PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 + // RSA public key in PKIX format, PKCS#1v1.5 signature + PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 + PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 PublicKeyDetails = 10 + PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 PublicKeyDetails = 11 + // RSA public key in PKIX format, RSASSA-PSS signature + PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256 PublicKeyDetails = 16 // See RFC4055 + PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256 PublicKeyDetails = 17 + PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 + // ECDSA + // + // Deprecated: Do not use. + PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 + PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 + PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 + PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 PublicKeyDetails = 13 + // Ed 25519 + PublicKeyDetails_PKIX_ED25519 PublicKeyDetails = 7 // See RFC8032 + PublicKeyDetails_PKIX_ED25519_PH PublicKeyDetails = 8 + // LMS and LM-OTS + // + // These keys and signatures may be used by private Sigstore + // deployments, but are not currently supported by the public + // good instance. + // + // USER WARNING: LMS and LM-OTS are both stateful signature schemes. + // Using them correctly requires discretion and careful consideration + // to ensure that individual secret keys are not used more than once. + // In addition, LM-OTS is a single-use scheme, meaning that it + // MUST NOT be used for more than one signature per LM-OTS key. + // If you cannot maintain these invariants, you MUST NOT use these + // schemes. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 +) + +// Enum value maps for PublicKeyDetails. +var ( + PublicKeyDetails_name = map[int32]string{ + 0: "PUBLIC_KEY_DETAILS_UNSPECIFIED", + 1: "PKCS1_RSA_PKCS1V5", + 2: "PKCS1_RSA_PSS", + 3: "PKIX_RSA_PKCS1V5", + 4: "PKIX_RSA_PSS", + 9: "PKIX_RSA_PKCS1V15_2048_SHA256", + 10: "PKIX_RSA_PKCS1V15_3072_SHA256", + 11: "PKIX_RSA_PKCS1V15_4096_SHA256", + 16: "PKIX_RSA_PSS_2048_SHA256", + 17: "PKIX_RSA_PSS_3072_SHA256", + 18: "PKIX_RSA_PSS_4096_SHA256", + 6: "PKIX_ECDSA_P256_HMAC_SHA_256", + 5: "PKIX_ECDSA_P256_SHA_256", + 12: "PKIX_ECDSA_P384_SHA_384", + 13: "PKIX_ECDSA_P521_SHA_512", + 7: "PKIX_ED25519", + 8: "PKIX_ED25519_PH", + 14: "LMS_SHA256", + 15: "LMOTS_SHA256", + } + PublicKeyDetails_value = map[string]int32{ + "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, + "PKCS1_RSA_PKCS1V5": 1, + "PKCS1_RSA_PSS": 2, + "PKIX_RSA_PKCS1V5": 3, + "PKIX_RSA_PSS": 4, + "PKIX_RSA_PKCS1V15_2048_SHA256": 9, + "PKIX_RSA_PKCS1V15_3072_SHA256": 10, + "PKIX_RSA_PKCS1V15_4096_SHA256": 11, + "PKIX_RSA_PSS_2048_SHA256": 16, + "PKIX_RSA_PSS_3072_SHA256": 17, + "PKIX_RSA_PSS_4096_SHA256": 18, + "PKIX_ECDSA_P256_HMAC_SHA_256": 6, + "PKIX_ECDSA_P256_SHA_256": 5, + "PKIX_ECDSA_P384_SHA_384": 12, + "PKIX_ECDSA_P521_SHA_512": 13, + "PKIX_ED25519": 7, + "PKIX_ED25519_PH": 8, + "LMS_SHA256": 14, + "LMOTS_SHA256": 15, + } +) + +func (x PublicKeyDetails) Enum() *PublicKeyDetails { + p := new(PublicKeyDetails) + *p = x + return p +} + +func (x PublicKeyDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicKeyDetails) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[1].Descriptor() +} + +func (PublicKeyDetails) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[1] +} + +func (x PublicKeyDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicKeyDetails.Descriptor instead. +func (PublicKeyDetails) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +type SubjectAlternativeNameType int32 + +const ( + SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED SubjectAlternativeNameType = 0 + SubjectAlternativeNameType_EMAIL SubjectAlternativeNameType = 1 + SubjectAlternativeNameType_URI SubjectAlternativeNameType = 2 + // OID 1.3.6.1.4.1.57264.1.7 + // See https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md#1361415726417--othername-san + // for more details. + SubjectAlternativeNameType_OTHER_NAME SubjectAlternativeNameType = 3 +) + +// Enum value maps for SubjectAlternativeNameType. +var ( + SubjectAlternativeNameType_name = map[int32]string{ + 0: "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "URI", + 3: "OTHER_NAME", + } + SubjectAlternativeNameType_value = map[string]int32{ + "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "URI": 2, + "OTHER_NAME": 3, + } +) + +func (x SubjectAlternativeNameType) Enum() *SubjectAlternativeNameType { + p := new(SubjectAlternativeNameType) + *p = x + return p +} + +func (x SubjectAlternativeNameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAlternativeNameType) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[2].Descriptor() +} + +func (SubjectAlternativeNameType) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[2] +} + +func (x SubjectAlternativeNameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAlternativeNameType.Descriptor instead. +func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +// HashOutput captures a digest of a 'message' (generic octet sequence) +// and the corresponding hash algorithm used. +type HashOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` +} + +func (x *HashOutput) Reset() { + *x = HashOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashOutput) ProtoMessage() {} + +func (x *HashOutput) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashOutput.ProtoReflect.Descriptor instead. +func (*HashOutput) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +func (x *HashOutput) GetAlgorithm() HashAlgorithm { + if x != nil { + return x.Algorithm + } + return HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED +} + +func (x *HashOutput) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// MessageSignature stores the computed signature over a message. +type MessageSignature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Message digest can be used to identify the artifact. + // Clients MUST NOT attempt to use this digest to verify the associated + // signature; it is intended solely for identification. + MessageDigest *HashOutput `protobuf:"bytes,1,opt,name=message_digest,json=messageDigest,proto3" json:"message_digest,omitempty"` + // The raw bytes as returned from the signature algorithm. + // The signature algorithm (and so the format of the signature bytes) + // are determined by the contents of the 'verification_material', + // either a key-pair or a certificate. If using a certificate, the + // certificate contains the required information on the signature + // algorithm. + // When using a key pair, the algorithm MUST be part of the public + // key, which MUST be communicated out-of-band. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *MessageSignature) Reset() { + *x = MessageSignature{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageSignature) ProtoMessage() {} + +func (x *MessageSignature) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageSignature.ProtoReflect.Descriptor instead. +func (*MessageSignature) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageSignature) GetMessageDigest() *HashOutput { + if x != nil { + return x.MessageDigest + } + return nil +} + +func (x *MessageSignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +// LogId captures the identity of a transparency log. +type LogId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` +} + +func (x *LogId) Reset() { + *x = LogId{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogId) ProtoMessage() {} + +func (x *LogId) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogId.ProtoReflect.Descriptor instead. +func (*LogId) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +func (x *LogId) GetKeyId() []byte { + if x != nil { + return x.KeyId + } + return nil +} + +// This message holds a RFC 3161 timestamp. +type RFC3161SignedTimestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` +} + +func (x *RFC3161SignedTimestamp) Reset() { + *x = RFC3161SignedTimestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RFC3161SignedTimestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RFC3161SignedTimestamp) ProtoMessage() {} + +func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RFC3161SignedTimestamp.ProtoReflect.Descriptor instead. +func (*RFC3161SignedTimestamp) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{3} +} + +func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { + if x != nil { + return x.SignedTimestamp + } + return nil +} + +type PublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DER-encoded public key, encoding method is specified by the + // key_details attribute. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` + // Key encoding and signature algorithm to use for this key. + KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + // Optional validity period for this key, *inclusive* of the endpoints. + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{4} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +func (x *PublicKey) GetKeyDetails() PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED +} + +func (x *PublicKey) GetValidFor() *TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// PublicKeyIdentifier can be used to identify an (out of band) delivered +// key, to verify a signature. +type PublicKeyIdentifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional unauthenticated hint on which key to use. + // The format of the hint must be agreed upon out of band by the + // signer and the verifiers, and so is not subject to this + // specification. + // Example use-case is to specify the public key to use, from a + // trusted key-ring. + // Implementors are RECOMMENDED to derive the value from the public + // key as described in RFC 6962. + // See: + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` +} + +func (x *PublicKeyIdentifier) Reset() { + *x = PublicKeyIdentifier{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublicKeyIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKeyIdentifier) ProtoMessage() {} + +func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKeyIdentifier.ProtoReflect.Descriptor instead. +func (*PublicKeyIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKeyIdentifier) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + +// An ASN.1 OBJECT IDENTIFIER +type ObjectIdentifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{6} +} + +func (x *ObjectIdentifier) GetId() []int32 { + if x != nil { + return x.Id + } + return nil +} + +// An OID and the corresponding (byte) value. +type ObjectIdentifierValuePair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ObjectIdentifierValuePair) Reset() { + *x = ObjectIdentifierValuePair{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectIdentifierValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifierValuePair) ProtoMessage() {} + +func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifierValuePair.ProtoReflect.Descriptor instead. +func (*ObjectIdentifierValuePair) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{7} +} + +func (x *ObjectIdentifierValuePair) GetOid() *ObjectIdentifier { + if x != nil { + return x.Oid + } + return nil +} + +func (x *ObjectIdentifierValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type DistinguishedName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` +} + +func (x *DistinguishedName) Reset() { + *x = DistinguishedName{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistinguishedName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistinguishedName) ProtoMessage() {} + +func (x *DistinguishedName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistinguishedName.ProtoReflect.Descriptor instead. +func (*DistinguishedName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{8} +} + +func (x *DistinguishedName) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *DistinguishedName) GetCommonName() string { + if x != nil { + return x.CommonName + } + return "" +} + +type X509Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{9} +} + +func (x *X509Certificate) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +type SubjectAlternativeName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are assignable to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` +} + +func (x *SubjectAlternativeName) Reset() { + *x = SubjectAlternativeName{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubjectAlternativeName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeName) ProtoMessage() {} + +func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeName.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{10} +} + +func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { + if x != nil { + return x.Type + } + return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED +} + +func (m *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if m != nil { + return m.Identity + } + return nil +} + +func (x *SubjectAlternativeName) GetRegexp() string { + if x, ok := x.GetIdentity().(*SubjectAlternativeName_Regexp); ok { + return x.Regexp + } + return "" +} + +func (x *SubjectAlternativeName) GetValue() string { + if x, ok := x.GetIdentity().(*SubjectAlternativeName_Value); ok { + return x.Value + } + return "" +} + +type isSubjectAlternativeName_Identity interface { + isSubjectAlternativeName_Identity() +} + +type SubjectAlternativeName_Regexp struct { + // A regular expression describing the expected value for + // the SAN. + Regexp string `protobuf:"bytes,2,opt,name=regexp,proto3,oneof"` +} + +type SubjectAlternativeName_Value struct { + // The exact value to match against. + Value string `protobuf:"bytes,3,opt,name=value,proto3,oneof"` +} + +func (*SubjectAlternativeName_Regexp) isSubjectAlternativeName_Identity() {} + +func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} + +// A collection of X.509 certificates. +// +// This "chain" can be used in multiple contexts, such as providing a root CA +// certificate within a TUF root of trust or multiple untrusted certificates for +// the purpose of chain building. +type X509CertificateChain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // One or more DER-encoded certificates. + // + // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence + // has an imposed order. Unless explicitly specified, there is otherwise no + // guaranteed order. + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` +} + +func (x *X509CertificateChain) Reset() { + *x = X509CertificateChain{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509CertificateChain) ProtoMessage() {} + +func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509CertificateChain.ProtoReflect.Descriptor instead. +func (*X509CertificateChain) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{11} +} + +func (x *X509CertificateChain) GetCertificates() []*X509Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +// The time range is closed and includes both the start and end times, +// (i.e., [start, end]). +// End is optional to be able to capture a period that has started but +// has no known end. +type TimeRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` +} + +func (x *TimeRange) Reset() { + *x = TimeRange{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeRange) ProtoMessage() {} + +func (x *TimeRange) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeRange.ProtoReflect.Descriptor instead. +func (*TimeRange) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{12} +} + +func (x *TimeRange) GetStart() *timestamppb.Timestamp { + if x != nil { + return x.Start + } + return nil +} + +func (x *TimeRange) GetEnd() *timestamppb.Timestamp { + if x != nil { + return x.End + } + return nil +} + +var File_sigstore_common_proto protoreflect.FileDescriptor + +var file_sigstore_common_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, + 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, + 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, + 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, + 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, + 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, + 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, + 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, + 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, + 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, + 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, + 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, + 0x34, 0x10, 0x05, 0x2a, 0xa7, 0x04, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, + 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, + 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, + 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, + 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x22, 0x04, 0x08, 0x13, 0x10, 0x32, 0x2a, 0x6f, 0x0a, + 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, + 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, + 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, + 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, + 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, + 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, + 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_common_proto_rawDescOnce sync.Once + file_sigstore_common_proto_rawDescData = file_sigstore_common_proto_rawDesc +) + +func file_sigstore_common_proto_rawDescGZIP() []byte { + file_sigstore_common_proto_rawDescOnce.Do(func() { + file_sigstore_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_common_proto_rawDescData) + }) + return file_sigstore_common_proto_rawDescData +} + +var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sigstore_common_proto_goTypes = []interface{}{ + (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm + (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails + (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType + (*HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput + (*MessageSignature)(nil), // 4: dev.sigstore.common.v1.MessageSignature + (*LogId)(nil), // 5: dev.sigstore.common.v1.LogId + (*RFC3161SignedTimestamp)(nil), // 6: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*PublicKey)(nil), // 7: dev.sigstore.common.v1.PublicKey + (*PublicKeyIdentifier)(nil), // 8: dev.sigstore.common.v1.PublicKeyIdentifier + (*ObjectIdentifier)(nil), // 9: dev.sigstore.common.v1.ObjectIdentifier + (*ObjectIdentifierValuePair)(nil), // 10: dev.sigstore.common.v1.ObjectIdentifierValuePair + (*DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*X509Certificate)(nil), // 12: dev.sigstore.common.v1.X509Certificate + (*SubjectAlternativeName)(nil), // 13: dev.sigstore.common.v1.SubjectAlternativeName + (*X509CertificateChain)(nil), // 14: dev.sigstore.common.v1.X509CertificateChain + (*TimeRange)(nil), // 15: dev.sigstore.common.v1.TimeRange + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sigstore_common_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.common.v1.HashOutput.algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 3, // 1: dev.sigstore.common.v1.MessageSignature.message_digest:type_name -> dev.sigstore.common.v1.HashOutput + 1, // 2: dev.sigstore.common.v1.PublicKey.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 15, // 3: dev.sigstore.common.v1.PublicKey.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 9, // 4: dev.sigstore.common.v1.ObjectIdentifierValuePair.oid:type_name -> dev.sigstore.common.v1.ObjectIdentifier + 2, // 5: dev.sigstore.common.v1.SubjectAlternativeName.type:type_name -> dev.sigstore.common.v1.SubjectAlternativeNameType + 12, // 6: dev.sigstore.common.v1.X509CertificateChain.certificates:type_name -> dev.sigstore.common.v1.X509Certificate + 16, // 7: dev.sigstore.common.v1.TimeRange.start:type_name -> google.protobuf.Timestamp + 16, // 8: dev.sigstore.common.v1.TimeRange.end:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_common_proto_init() } +func file_sigstore_common_proto_init() { + if File_sigstore_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sigstore_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageSignature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RFC3161SignedTimestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKeyIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectIdentifierValuePair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistinguishedName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAlternativeName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509CertificateChain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimeRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_common_proto_rawDesc, + NumEnums: 3, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_common_proto_goTypes, + DependencyIndexes: file_sigstore_common_proto_depIdxs, + EnumInfos: file_sigstore_common_proto_enumTypes, + MessageInfos: file_sigstore_common_proto_msgTypes, + }.Build() + File_sigstore_common_proto = out.File + file_sigstore_common_proto_rawDesc = nil + file_sigstore_common_proto_goTypes = nil + file_sigstore_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go new file mode 100644 index 0000000000..01008e9980 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -0,0 +1,273 @@ +// https://raw.githubusercontent.com/secure-systems-lab/dsse/9c813476bd36de70a5738c72e784f123ecea16af/envelope.proto + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +// source: envelope.proto + +package dsse + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An authenticated message of arbitrary type. +type Envelope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Message to be signed. (In JSON, this is encoded as base64.) + // REQUIRED. + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + // String unambiguously identifying how to interpret payload. + // REQUIRED. + PayloadType string `protobuf:"bytes,2,opt,name=payloadType,proto3" json:"payloadType,omitempty"` + // Signature over: + // + // PAE(type, payload) + // + // Where PAE is defined as: + // PAE(type, payload) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(payload) + SP + payload + // + = concatenation + // SP = ASCII space [0x20] + // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] + // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros + // REQUIRED (length >= 1). + Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` +} + +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{0} +} + +func (x *Envelope) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Envelope) GetPayloadType() string { + if x != nil { + return x.PayloadType + } + return "" +} + +func (x *Envelope) GetSignatures() []*Signature { + if x != nil { + return x.Signatures + } + return nil +} + +type Signature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signature itself. (In JSON, this is encoded as base64.) + // REQUIRED. + Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` + // *Unauthenticated* hint identifying which public key was used. + // OPTIONAL. + Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` +} + +func (x *Signature) Reset() { + *x = Signature{} + if protoimpl.UnsafeEnabled { + mi := &file_envelope_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_envelope_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_envelope_proto_rawDescGZIP(), []int{1} +} + +func (x *Signature) GetSig() []byte { + if x != nil { + return x.Sig + } + return nil +} + +func (x *Signature) GetKeyid() string { + if x != nil { + return x.Keyid + } + return "" +} + +var File_envelope_proto protoreflect.FileDescriptor + +var file_envelope_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x45, + 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x65, 0x79, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x69, 0x64, 0x42, 0x44, + 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, + 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x64, + 0x73, 0x73, 0x65, 0xea, 0x02, 0x0e, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x44, 0x53, 0x53, 0x45, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envelope_proto_rawDescOnce sync.Once + file_envelope_proto_rawDescData = file_envelope_proto_rawDesc +) + +func file_envelope_proto_rawDescGZIP() []byte { + file_envelope_proto_rawDescOnce.Do(func() { + file_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(file_envelope_proto_rawDescData) + }) + return file_envelope_proto_rawDescData +} + +var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envelope_proto_goTypes = []interface{}{ + (*Envelope)(nil), // 0: io.intoto.Envelope + (*Signature)(nil), // 1: io.intoto.Signature +} +var file_envelope_proto_depIdxs = []int32{ + 1, // 0: io.intoto.Envelope.signatures:type_name -> io.intoto.Signature + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envelope_proto_init() } +func file_envelope_proto_init() { + if File_envelope_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envelope_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Signature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envelope_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envelope_proto_goTypes, + DependencyIndexes: file_envelope_proto_depIdxs, + MessageInfos: file_envelope_proto_msgTypes, + }.Build() + File_envelope_proto = out.File + file_envelope_proto_rawDesc = nil + file_envelope_proto_goTypes = nil + file_envelope_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go new file mode 100644 index 0000000000..0ab50bf742 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -0,0 +1,632 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +// source: sigstore_rekor.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// KindVersion contains the entry's kind and api version. +type KindVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Kind is the type of entry being stored in the log. + // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The specific api version of the type. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *KindVersion) Reset() { + *x = KindVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KindVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KindVersion) ProtoMessage() {} + +func (x *KindVersion) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KindVersion.ProtoReflect.Descriptor instead. +func (*KindVersion) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{0} +} + +func (x *KindVersion) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *KindVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// The checkpoint MUST contain an origin string as a unique log identifier, +// the tree size, and the root hash. It MAY also be followed by optional data, +// and clients MUST NOT assume optional data. The checkpoint MUST also contain +// a signature over the root hash (tree head). The checkpoint MAY contain additional +// signatures, but the first SHOULD be the signature from the log. Checkpoint contents +// are concatenated with newlines into a single string. +// The checkpoint format is described in +// https://github.com/transparency-dev/formats/blob/main/log/README.md +// and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. +// An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go +type Checkpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` +} + +func (x *Checkpoint) Reset() { + *x = Checkpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Checkpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checkpoint) ProtoMessage() {} + +func (x *Checkpoint) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checkpoint.ProtoReflect.Descriptor instead. +func (*Checkpoint) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{1} +} + +func (x *Checkpoint) GetEnvelope() string { + if x != nil { + return x.Envelope + } + return "" +} + +// InclusionProof is the proof returned from the transparency log. Can +// be used for offline or online verification against the log. +type InclusionProof struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The index of the entry in the tree it was written to. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The hash digest stored at the root of the merkle tree at the time + // the proof was generated. + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + // The size of the merkle tree at the time the proof was generated. + TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"` + // A list of hashes required to compute the inclusion proof, sorted + // in order from leaf to root. + // Note that leaf and root hashes are not included. + // The root hash is available separately in this message, and the + // leaf hash should be calculated by the client. + Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` + // Signature of the tree head, as of the time of this proof was + // generated. See above info on 'Checkpoint' for more details. + Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` +} + +func (x *InclusionProof) Reset() { + *x = InclusionProof{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InclusionProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionProof) ProtoMessage() {} + +func (x *InclusionProof) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionProof.ProtoReflect.Descriptor instead. +func (*InclusionProof) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{2} +} + +func (x *InclusionProof) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *InclusionProof) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *InclusionProof) GetTreeSize() int64 { + if x != nil { + return x.TreeSize + } + return 0 +} + +func (x *InclusionProof) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +func (x *InclusionProof) GetCheckpoint() *Checkpoint { + if x != nil { + return x.Checkpoint + } + return nil +} + +// The inclusion promise is calculated by Rekor. It's calculated as a +// signature over a canonical JSON serialization of the persisted entry, the +// log ID, log index and the integration timestamp. +// See https://github.com/sigstore/rekor/blob/a6e58f72b6b18cc06cefe61808efd562b9726330/pkg/api/entries.go#L54 +// The format of the signature depends on the transparency log's public key. +// If the signature algorithm requires a hash function and/or a signature +// scheme (e.g. RSA) those has to be retrieved out-of-band from the log's +// operators, together with the public key. +// This is used to verify the integration timestamp's value and that the log +// has promised to include the entry. +type InclusionPromise struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` +} + +func (x *InclusionPromise) Reset() { + *x = InclusionPromise{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InclusionPromise) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InclusionPromise) ProtoMessage() {} + +func (x *InclusionPromise) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InclusionPromise.ProtoReflect.Descriptor instead. +func (*InclusionPromise) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{3} +} + +func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { + if x != nil { + return x.SignedEntryTimestamp + } + return nil +} + +// TransparencyLogEntry captures all the details required from Rekor to +// reconstruct an entry, given that the payload is provided via other means. +// This type can easily be created from the existing response from Rekor. +// Future iterations could rely on Rekor returning the minimal set of +// attributes (excluding the payload) that are required for verifying the +// inclusion promise. The inclusion promise (called SignedEntryTimestamp in +// the response from Rekor) is similar to a Signed Certificate Timestamp +// as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. +type TransparencyLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The global index of the entry, used when querying the log by index. + LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` + // The unique identifier of the log. + LogId *v1.LogId `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The kind (type) and version of the object associated with this + // entry. These values are required to construct the entry during + // verification. + KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` + // The UNIX timestamp from the log when the entry was persisted. + IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` + // The inclusion promise/signed entry timestamp from the log. + // Required for v0.1 bundles, and MUST be verified. + // Optional for >= v0.2 bundles, and SHOULD be verified when present. + // Also may be used as a signed timestamp. + InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` + // The inclusion proof can be used for offline or online verification + // that the entry was appended to the log, and that the log has not been + // altered. + InclusionProof *InclusionProof `protobuf:"bytes,6,opt,name=inclusion_proof,json=inclusionProof,proto3" json:"inclusion_proof,omitempty"` + // Optional. The canonicalized transparency log entry, used to + // reconstruct the Signed Entry Timestamp (SET) during verification. + // The contents of this field are the same as the `body` field in + // a Rekor response, meaning that it does **not** include the "full" + // canonicalized form (of log index, ID, etc.) which are + // exposed as separate fields. The verifier is responsible for + // combining the `canonicalized_body`, `log_index`, `log_id`, + // and `integrated_time` into the payload that the SET's signature + // is generated over. + // This field is intended to be used in cases where the SET cannot be + // produced determinisitically (e.g. inconsistent JSON field ordering, + // differing whitespace, etc). + // + // If set, clients MUST verify that the signature referenced in the + // `canonicalized_body` matches the signature provided in the + // `Bundle.content`. + // If not set, clients are responsible for constructing an equivalent + // payload from other sources to verify the signature. + CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` +} + +func (x *TransparencyLogEntry) Reset() { + *x = TransparencyLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransparencyLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogEntry) ProtoMessage() {} + +func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_rekor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogEntry.ProtoReflect.Descriptor instead. +func (*TransparencyLogEntry) Descriptor() ([]byte, []int) { + return file_sigstore_rekor_proto_rawDescGZIP(), []int{4} +} + +func (x *TransparencyLogEntry) GetLogIndex() int64 { + if x != nil { + return x.LogIndex + } + return 0 +} + +func (x *TransparencyLogEntry) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogEntry) GetKindVersion() *KindVersion { + if x != nil { + return x.KindVersion + } + return nil +} + +func (x *TransparencyLogEntry) GetIntegratedTime() int64 { + if x != nil { + return x.IntegratedTime + } + return 0 +} + +func (x *TransparencyLogEntry) GetInclusionPromise() *InclusionPromise { + if x != nil { + return x.InclusionPromise + } + return nil +} + +func (x *TransparencyLogEntry) GetInclusionProof() *InclusionProof { + if x != nil { + return x.InclusionProof + } + return nil +} + +func (x *TransparencyLogEntry) GetCanonicalizedBody() []byte { + if x != nil { + return x.CanonicalizedBody + } + return nil +} + +var File_sigstore_rekor_proto protoreflect.FileDescriptor + +var file_sigstore_rekor_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x45, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1d, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x0a, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x20, + 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x20, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x10, 0x49, 0x6e, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x16, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xc7, 0x03, 0x0a, 0x14, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x20, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x39, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, + 0x49, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x4a, + 0x0a, 0x0c, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6b, + 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x10, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6f, + 0x64, 0x79, 0x42, 0x78, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, + 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_rekor_proto_rawDescOnce sync.Once + file_sigstore_rekor_proto_rawDescData = file_sigstore_rekor_proto_rawDesc +) + +func file_sigstore_rekor_proto_rawDescGZIP() []byte { + file_sigstore_rekor_proto_rawDescOnce.Do(func() { + file_sigstore_rekor_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_rekor_proto_rawDescData) + }) + return file_sigstore_rekor_proto_rawDescData +} + +var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_rekor_proto_goTypes = []interface{}{ + (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion + (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint + (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof + (*InclusionPromise)(nil), // 3: dev.sigstore.rekor.v1.InclusionPromise + (*TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry + (*v1.LogId)(nil), // 5: dev.sigstore.common.v1.LogId +} +var file_sigstore_rekor_proto_depIdxs = []int32{ + 1, // 0: dev.sigstore.rekor.v1.InclusionProof.checkpoint:type_name -> dev.sigstore.rekor.v1.Checkpoint + 5, // 1: dev.sigstore.rekor.v1.TransparencyLogEntry.log_id:type_name -> dev.sigstore.common.v1.LogId + 0, // 2: dev.sigstore.rekor.v1.TransparencyLogEntry.kind_version:type_name -> dev.sigstore.rekor.v1.KindVersion + 3, // 3: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_promise:type_name -> dev.sigstore.rekor.v1.InclusionPromise + 2, // 4: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_proof:type_name -> dev.sigstore.rekor.v1.InclusionProof + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_sigstore_rekor_proto_init() } +func file_sigstore_rekor_proto_init() { + if File_sigstore_rekor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sigstore_rekor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KindVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checkpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InclusionProof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InclusionPromise); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransparencyLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_rekor_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_rekor_proto_goTypes, + DependencyIndexes: file_sigstore_rekor_proto_depIdxs, + MessageInfos: file_sigstore_rekor_proto_msgTypes, + }.Build() + File_sigstore_rekor_proto = out.File + file_sigstore_rekor_proto_rawDesc = nil + file_sigstore_rekor_proto_goTypes = nil + file_sigstore_rekor_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go new file mode 100644 index 0000000000..c8e0075581 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go @@ -0,0 +1,765 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +// source: sigstore_trustroot.proto + +package v1 + +import ( + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TransparencyLogInstance describes the immutable parameters from a +// transparency log. +// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters +// for more details. +// The included parameters are the minimal set required to identify a log, +// and verify an inclusion proof/promise. +type TransparencyLogInstance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The base URL at which can be used to URLs for the client. + BaseUrl string `protobuf:"bytes,1,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + // The hash algorithm used for the Merkle Tree. + HashAlgorithm v1.HashAlgorithm `protobuf:"varint,2,opt,name=hash_algorithm,json=hashAlgorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"hash_algorithm,omitempty"` + // The public key used to verify signatures generated by the log. + // This attribute contains the signature algorithm used by the log. + PublicKey *v1.PublicKey `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The unique identifier for this transparency log. + // Represented as the SHA-256 hash of the log's public key, + // calculated over the DER encoding of the key represented as + // SubjectPublicKeyInfo. + // See https://www.rfc-editor.org/rfc/rfc6962#section-3.2 + LogId *v1.LogId `protobuf:"bytes,4,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` + // The checkpoint key identifier for the log used in a checkpoint. + // Optional, not provided for logs that do not generate checkpoints. + // For logs that do generate checkpoints, if not set, assume + // log_id equals checkpoint_key_id. + // Follows the specification described here + // for ECDSA and Ed25519 signatures: + // https://github.com/C2SP/C2SP/blob/main/signed-note.md#signatures + // For RSA signatures, the key ID will match the ECDSA format, the + // hashed DER-encoded SPKI public key. Publicly witnessed logs MUST NOT + // use RSA-signed checkpoints, since witnesses do not support + // RSA signatures. + // This is provided for convenience. Clients can also calculate the + // checkpoint key ID given the log's public key. + // SHOULD be set for logs generating Ed25519 signatures. + // SHOULD be 4 bytes long, as a truncated hash. + CheckpointKeyId *v1.LogId `protobuf:"bytes,5,opt,name=checkpoint_key_id,json=checkpointKeyId,proto3" json:"checkpoint_key_id,omitempty"` +} + +func (x *TransparencyLogInstance) Reset() { + *x = TransparencyLogInstance{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_trustroot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransparencyLogInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransparencyLogInstance) ProtoMessage() {} + +func (x *TransparencyLogInstance) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransparencyLogInstance.ProtoReflect.Descriptor instead. +func (*TransparencyLogInstance) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0} +} + +func (x *TransparencyLogInstance) GetBaseUrl() string { + if x != nil { + return x.BaseUrl + } + return "" +} + +func (x *TransparencyLogInstance) GetHashAlgorithm() v1.HashAlgorithm { + if x != nil { + return x.HashAlgorithm + } + return v1.HashAlgorithm(0) +} + +func (x *TransparencyLogInstance) GetPublicKey() *v1.PublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *TransparencyLogInstance) GetLogId() *v1.LogId { + if x != nil { + return x.LogId + } + return nil +} + +func (x *TransparencyLogInstance) GetCheckpointKeyId() *v1.LogId { + if x != nil { + return x.CheckpointKeyId + } + return nil +} + +// CertificateAuthority enlists the information required to identify which +// CA to use and perform signature verification. +type CertificateAuthority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The root certificate MUST be self-signed, and so the subject and + // issuer are the same. + Subject *v1.DistinguishedName `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // The URI identifies the certificate authority. + // + // It is RECOMMENDED that the URI is the base URL for the certificate + // authority, that can be provided to any SDK/client provided + // by the certificate authority to interact with the certificate + // authority. + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + // The certificate chain for this CA. The last certificate in the chain + // MUST be the trust anchor. The trust anchor MAY be a self-signed root + // CA certificate or MAY be an intermediate CA certificate. + CertChain *v1.X509CertificateChain `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // The time the *entire* chain was valid. This is at max the + // longest interval when *all* certificates in the chain were valid, + // but it MAY be shorter. Clients MUST check timestamps against *both* + // the `valid_for` time range *and* the entire certificate chain. + // + // The TimeRange should be considered valid *inclusive* of the + // endpoints. + ValidFor *v1.TimeRange `protobuf:"bytes,4,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` +} + +func (x *CertificateAuthority) Reset() { + *x = CertificateAuthority{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_trustroot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateAuthority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateAuthority) ProtoMessage() {} + +func (x *CertificateAuthority) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateAuthority.ProtoReflect.Descriptor instead. +func (*CertificateAuthority) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{1} +} + +func (x *CertificateAuthority) GetSubject() *v1.DistinguishedName { + if x != nil { + return x.Subject + } + return nil +} + +func (x *CertificateAuthority) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (x *CertificateAuthority) GetCertChain() *v1.X509CertificateChain { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *CertificateAuthority) GetValidFor() *v1.TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// TrustedRoot describes the client's complete set of trusted entities. +// How the TrustedRoot is populated is not specified, but can be a +// combination of many sources such as TUF repositories, files on disk etc. +// +// The TrustedRoot is not meant to be used for any artifact verification, only +// to capture the complete/global set of trusted verification materials. +// When verifying an artifact, based on the artifact and policies, a selection +// of keys/authorities are expected to be extracted and provided to the +// verification function. This way the set of keys/authorities can be kept to +// a minimal set by the policy to gain better control over what signatures +// that are allowed. +// +// The embedded transparency logs, CT logs, CAs and TSAs MUST include any +// previously used instance -- otherwise signatures made in the past cannot +// be verified. +// +// All the listed instances SHOULD be sorted by the 'valid_for' in ascending +// order, that is, the oldest instance first. Only the last instance is +// allowed to have their 'end' timestamp unset. All previous instances MUST +// have a closed interval of validity. The last instance MAY have a closed +// interval. Clients MUST accept instances that overlaps in time, if not +// clients may experience problems during rotations of verification +// materials. +// +// To be able to manage planned rotations of either transparency logs or +// certificate authorities, clienst MUST accept lists of instances where +// the last instance have a 'valid_for' that belongs to the future. +// This should not be a problem as clients SHOULD first seek the trust root +// for a suitable instance before creating a per artifact trust root (that +// is, a sub-set of the complete trust root) that is used for verification. +type TrustedRoot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MUST be application/vnd.dev.sigstore.trustedroot.v0.1+json + // when encoded as JSON. + // Clients MUST be able to process and parse content with the media + // type defined in the old format: + // application/vnd.dev.sigstore.trustedroot+json;version=0.1 + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // A set of trusted Rekor servers. + Tlogs []*TransparencyLogInstance `protobuf:"bytes,2,rep,name=tlogs,proto3" json:"tlogs,omitempty"` + // A set of trusted certificate authorities (e.g Fulcio), and any + // intermediate certificates they provide. + // If a CA is issuing multiple intermediate certificate, each + // combination shall be represented as separate chain. I.e, a single + // root cert may appear in multiple chains but with different + // intermediate and/or leaf certificates. + // The certificates are intended to be used for verifying artifact + // signatures. + CertificateAuthorities []*CertificateAuthority `protobuf:"bytes,3,rep,name=certificate_authorities,json=certificateAuthorities,proto3" json:"certificate_authorities,omitempty"` + // A set of trusted certificate transparency logs. + Ctlogs []*TransparencyLogInstance `protobuf:"bytes,4,rep,name=ctlogs,proto3" json:"ctlogs,omitempty"` + // A set of trusted timestamping authorities. + TimestampAuthorities []*CertificateAuthority `protobuf:"bytes,5,rep,name=timestamp_authorities,json=timestampAuthorities,proto3" json:"timestamp_authorities,omitempty"` +} + +func (x *TrustedRoot) Reset() { + *x = TrustedRoot{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_trustroot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TrustedRoot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrustedRoot) ProtoMessage() {} + +func (x *TrustedRoot) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrustedRoot.ProtoReflect.Descriptor instead. +func (*TrustedRoot) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{2} +} + +func (x *TrustedRoot) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *TrustedRoot) GetTlogs() []*TransparencyLogInstance { + if x != nil { + return x.Tlogs + } + return nil +} + +func (x *TrustedRoot) GetCertificateAuthorities() []*CertificateAuthority { + if x != nil { + return x.CertificateAuthorities + } + return nil +} + +func (x *TrustedRoot) GetCtlogs() []*TransparencyLogInstance { + if x != nil { + return x.Ctlogs + } + return nil +} + +func (x *TrustedRoot) GetTimestampAuthorities() []*CertificateAuthority { + if x != nil { + return x.TimestampAuthorities + } + return nil +} + +// SigningConfig represents the trusted entities/state needed by Sigstore +// signing. In particular, it primarily contains service URLs that a Sigstore +// signer may need to connect to for the online aspects of signing. +type SigningConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL to a Fulcio-compatible CA, capable of receiving + // Certificate Signing Requests (CSRs) and responding with + // issued certificates. + // + // This URL **MUST** be the "base" URL for the CA, which clients + // should construct an appropriate CSR endpoint on top of. + // For example, if `ca_url` is `https://example.com/ca`, then + // the client **MAY** construct the CSR endpoint as + // `https://example.com/ca/api/v2/signingCert`. + CaUrl string `protobuf:"bytes,1,opt,name=ca_url,json=caUrl,proto3" json:"ca_url,omitempty"` + // A URL to an OpenID Connect identity provider. + // + // This URL **MUST** be the "base" URL for the OIDC IdP, which clients + // should perform well-known OpenID Connect discovery against. + OidcUrl string `protobuf:"bytes,2,opt,name=oidc_url,json=oidcUrl,proto3" json:"oidc_url,omitempty"` + // One or more URLs to Rekor-compatible transparency log. + // + // Each URL **MUST** be the "base" URL for the transparency log, + // which clients should construct appropriate API endpoints on top of. + TlogUrls []string `protobuf:"bytes,3,rep,name=tlog_urls,json=tlogUrls,proto3" json:"tlog_urls,omitempty"` + // One ore more URLs to RFC 3161 Time Stamping Authority (TSA). + // + // Each URL **MUST** be the **full** URL for the TSA, meaning that it + // should be suitable for submitting Time Stamp Requests (TSRs) to + // via HTTP, per RFC 3161. + TsaUrls []string `protobuf:"bytes,4,rep,name=tsa_urls,json=tsaUrls,proto3" json:"tsa_urls,omitempty"` +} + +func (x *SigningConfig) Reset() { + *x = SigningConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_trustroot_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SigningConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SigningConfig) ProtoMessage() {} + +func (x *SigningConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SigningConfig.ProtoReflect.Descriptor instead. +func (*SigningConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{3} +} + +func (x *SigningConfig) GetCaUrl() string { + if x != nil { + return x.CaUrl + } + return "" +} + +func (x *SigningConfig) GetOidcUrl() string { + if x != nil { + return x.OidcUrl + } + return "" +} + +func (x *SigningConfig) GetTlogUrls() []string { + if x != nil { + return x.TlogUrls + } + return nil +} + +func (x *SigningConfig) GetTsaUrls() []string { + if x != nil { + return x.TsaUrls + } + return nil +} + +// ClientTrustConfig describes the complete state needed by a client +// to perform both signing and verification operations against a particular +// instance of Sigstore. +type ClientTrustConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MUST be application/vnd.dev.sigstore.clienttrustconfig.v0.1+json + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The root of trust, which MUST be present. + TrustedRoot *TrustedRoot `protobuf:"bytes,2,opt,name=trusted_root,json=trustedRoot,proto3" json:"trusted_root,omitempty"` + // Configuration for signing clients, which MUST be present. + SigningConfig *SigningConfig `protobuf:"bytes,3,opt,name=signing_config,json=signingConfig,proto3" json:"signing_config,omitempty"` +} + +func (x *ClientTrustConfig) Reset() { + *x = ClientTrustConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_trustroot_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientTrustConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientTrustConfig) ProtoMessage() {} + +func (x *ClientTrustConfig) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_trustroot_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientTrustConfig.ProtoReflect.Descriptor instead. +func (*ClientTrustConfig) Descriptor() ([]byte, []int) { + return file_sigstore_trustroot_proto_rawDescGZIP(), []int{4} +} + +func (x *ClientTrustConfig) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ClientTrustConfig) GetTrustedRoot() *TrustedRoot { + if x != nil { + return x.TrustedRoot + } + return nil +} + +func (x *ClientTrustConfig) GetSigningConfig() *SigningConfig { + if x != nil { + return x.SigningConfig + } + return nil +} + +var File_sigstore_trustroot_proto protoreflect.FileDescriptor + +var file_sigstore_trustroot_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, + 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x02, + 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x73, + 0x65, 0x55, 0x72, 0x6c, 0x12, 0x4c, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x49, 0x64, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x6f, 0x67, 0x49, 0x64, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x49, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, + 0x6f, 0x72, 0x22, 0x92, 0x03, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x48, 0x0a, 0x05, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x16, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, + 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, + 0x73, 0x12, 0x64, 0x0a, 0x15, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x79, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x15, 0x0a, 0x06, 0x63, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x61, 0x55, 0x72, 0x6c, 0x12, + 0x19, 0x0a, 0x08, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x69, 0x64, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6c, + 0x6f, 0x67, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6c, 0x6f, 0x67, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x73, 0x61, 0x5f, 0x75, + 0x72, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x73, 0x61, 0x55, 0x72, + 0x6c, 0x73, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, + 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x88, 0x01, + 0x0a, 0x1f, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, + 0x31, 0x42, 0x0e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, + 0x6f, 0x2f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2f, 0x76, 0x31, 0xea, 0x02, + 0x17, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x52, 0x6f, 0x6f, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sigstore_trustroot_proto_rawDescOnce sync.Once + file_sigstore_trustroot_proto_rawDescData = file_sigstore_trustroot_proto_rawDesc +) + +func file_sigstore_trustroot_proto_rawDescGZIP() []byte { + file_sigstore_trustroot_proto_rawDescOnce.Do(func() { + file_sigstore_trustroot_proto_rawDescData = protoimpl.X.CompressGZIP(file_sigstore_trustroot_proto_rawDescData) + }) + return file_sigstore_trustroot_proto_rawDescData +} + +var file_sigstore_trustroot_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_sigstore_trustroot_proto_goTypes = []interface{}{ + (*TransparencyLogInstance)(nil), // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance + (*CertificateAuthority)(nil), // 1: dev.sigstore.trustroot.v1.CertificateAuthority + (*TrustedRoot)(nil), // 2: dev.sigstore.trustroot.v1.TrustedRoot + (*SigningConfig)(nil), // 3: dev.sigstore.trustroot.v1.SigningConfig + (*ClientTrustConfig)(nil), // 4: dev.sigstore.trustroot.v1.ClientTrustConfig + (v1.HashAlgorithm)(0), // 5: dev.sigstore.common.v1.HashAlgorithm + (*v1.PublicKey)(nil), // 6: dev.sigstore.common.v1.PublicKey + (*v1.LogId)(nil), // 7: dev.sigstore.common.v1.LogId + (*v1.DistinguishedName)(nil), // 8: dev.sigstore.common.v1.DistinguishedName + (*v1.X509CertificateChain)(nil), // 9: dev.sigstore.common.v1.X509CertificateChain + (*v1.TimeRange)(nil), // 10: dev.sigstore.common.v1.TimeRange +} +var file_sigstore_trustroot_proto_depIdxs = []int32{ + 5, // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance.hash_algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 6, // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance.public_key:type_name -> dev.sigstore.common.v1.PublicKey + 7, // 2: dev.sigstore.trustroot.v1.TransparencyLogInstance.log_id:type_name -> dev.sigstore.common.v1.LogId + 7, // 3: dev.sigstore.trustroot.v1.TransparencyLogInstance.checkpoint_key_id:type_name -> dev.sigstore.common.v1.LogId + 8, // 4: dev.sigstore.trustroot.v1.CertificateAuthority.subject:type_name -> dev.sigstore.common.v1.DistinguishedName + 9, // 5: dev.sigstore.trustroot.v1.CertificateAuthority.cert_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain + 10, // 6: dev.sigstore.trustroot.v1.CertificateAuthority.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 0, // 7: dev.sigstore.trustroot.v1.TrustedRoot.tlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 1, // 8: dev.sigstore.trustroot.v1.TrustedRoot.certificate_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 0, // 9: dev.sigstore.trustroot.v1.TrustedRoot.ctlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance + 1, // 10: dev.sigstore.trustroot.v1.TrustedRoot.timestamp_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority + 2, // 11: dev.sigstore.trustroot.v1.ClientTrustConfig.trusted_root:type_name -> dev.sigstore.trustroot.v1.TrustedRoot + 3, // 12: dev.sigstore.trustroot.v1.ClientTrustConfig.signing_config:type_name -> dev.sigstore.trustroot.v1.SigningConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_sigstore_trustroot_proto_init() } +func file_sigstore_trustroot_proto_init() { + if File_sigstore_trustroot_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sigstore_trustroot_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransparencyLogInstance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_trustroot_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateAuthority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_trustroot_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TrustedRoot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_trustroot_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SigningConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_trustroot_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientTrustConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sigstore_trustroot_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_trustroot_proto_goTypes, + DependencyIndexes: file_sigstore_trustroot_proto_depIdxs, + MessageInfos: file_sigstore_trustroot_proto_msgTypes, + }.Build() + File_sigstore_trustroot_proto = out.File + file_sigstore_trustroot_proto_rawDesc = nil + file_sigstore_trustroot_proto_goTypes = nil + file_sigstore_trustroot_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/tle/tle.go b/vendor/github.com/sigstore/rekor/pkg/tle/tle.go new file mode 100644 index 0000000000..2e51f5bcc3 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/tle/tle.go @@ -0,0 +1,106 @@ +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tle + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "fmt" + + "github.com/go-openapi/runtime" + rekor_pb_common "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + rekor_pb "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + "google.golang.org/protobuf/encoding/protojson" +) + +// GenerateTransparencyLogEntry returns a sigstore/protobuf-specs compliant message containing a +// TransparencyLogEntry as defined at https://github.com/sigstore/protobuf-specs/blob/main/protos/sigstore_rekor.proto +func GenerateTransparencyLogEntry(anon models.LogEntryAnon) (*rekor_pb.TransparencyLogEntry, error) { + logIDHash, err := hex.DecodeString(*anon.LogID) + if err != nil { + return nil, fmt.Errorf("decoding logID string: %w", err) + } + + rootHash, err := hex.DecodeString(*anon.Verification.InclusionProof.RootHash) + if err != nil { + return nil, fmt.Errorf("decoding inclusion proof root hash: %w", err) + } + + inclusionProofHashes := make([][]byte, len(anon.Verification.InclusionProof.Hashes)) + for i, hash := range anon.Verification.InclusionProof.Hashes { + hashBytes, err := hex.DecodeString(hash) + if err != nil { + return nil, fmt.Errorf("decoding inclusion proof hash: %w", err) + } + inclusionProofHashes[i] = hashBytes + } + + // Different call paths may supply string or []byte. If string, it is base64 encoded. + var body []byte + switch v := anon.Body.(type) { + case string: + b, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, fmt.Errorf("base64 decoding body: %w", err) + } + body = b + case []byte: + body = v + default: + return nil, fmt.Errorf("body is not string or []byte: (%T)%v", v, v) + } + + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + eimpl, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + return &rekor_pb.TransparencyLogEntry{ + LogIndex: *anon.LogIndex, // the global log index + LogId: &rekor_pb_common.LogId{ + KeyId: logIDHash, + }, + KindVersion: &rekor_pb.KindVersion{ + Kind: pe.Kind(), + Version: eimpl.APIVersion(), + }, + IntegratedTime: *anon.IntegratedTime, + InclusionPromise: &rekor_pb.InclusionPromise{ + SignedEntryTimestamp: anon.Verification.SignedEntryTimestamp, + }, + InclusionProof: &rekor_pb.InclusionProof{ + LogIndex: *anon.Verification.InclusionProof.LogIndex, // relative to the specific tree the entry is found in + RootHash: rootHash, + TreeSize: *anon.Verification.InclusionProof.TreeSize, + Hashes: inclusionProofHashes, + Checkpoint: &rekor_pb.Checkpoint{ + Envelope: *anon.Verification.InclusionProof.Checkpoint, + }, + }, + CanonicalizedBody: body, // we don't call eimpl.Canonicalize in the case that the logic is different in this caller vs when it was persisted in the log + }, nil +} + +// MarshalTLEToJSON marshals a TransparencyLogEntry message to JSON according to the protobuf JSON encoding rules +func MarshalTLEToJSON(tle *rekor_pb.TransparencyLogEntry) ([]byte, error) { + return protojson.Marshal(tle) +} diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go new file mode 100644 index 0000000000..7b519b58e8 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go @@ -0,0 +1,234 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/rekor/pkg/generated/client/tlog" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/util" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/sigstore/sigstore/pkg/signature/options" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" +) + +// ProveConsistency verifies consistency between an initial, trusted STH +// and a second new STH. Callers MUST verify signature on the STHs'. +func ProveConsistency(ctx context.Context, rClient *client.Rekor, + oldSTH *util.SignedCheckpoint, newSTH *util.SignedCheckpoint, treeID string) error { + oldTreeSize := int64(oldSTH.Size) + switch { + case oldTreeSize == 0: + return errors.New("consistency proofs can not be computed starting from an empty log") + case oldTreeSize == int64(newSTH.Size): + if !bytes.Equal(oldSTH.Hash, newSTH.Hash) { + return errors.New("old root hash does not match STH hash") + } + case oldTreeSize < int64(newSTH.Size): + consistencyParams := tlog.NewGetLogProofParamsWithContext(ctx) + consistencyParams.FirstSize = &oldTreeSize // Root size at the old, or trusted state. + consistencyParams.LastSize = int64(newSTH.Size) // Root size at the new state to verify against. + consistencyParams.TreeID = &treeID + consistencyProof, err := rClient.Tlog.GetLogProof(consistencyParams) + if err != nil { + return err + } + var hashes [][]byte + for _, h := range consistencyProof.Payload.Hashes { + b, err := hex.DecodeString(h) + if err != nil { + return errors.New("error decoding consistency proof hashes") + } + hashes = append(hashes, b) + } + if err := proof.VerifyConsistency(rfc6962.DefaultHasher, + oldSTH.Size, newSTH.Size, hashes, oldSTH.Hash, newSTH.Hash); err != nil { + return err + } + case oldTreeSize > int64(newSTH.Size): + return errors.New("inclusion proof returned a tree size larger than the verified tree size") + } + return nil + +} + +// VerifyCurrentCheckpoint verifies the provided checkpoint by verifying consistency +// against a newly fetched Checkpoint. +// nolint +func VerifyCurrentCheckpoint(ctx context.Context, rClient *client.Rekor, verifier signature.Verifier, + oldSTH *util.SignedCheckpoint) (*util.SignedCheckpoint, error) { + // The oldSTH should already be verified, but check for robustness. + if !oldSTH.Verify(verifier) { + return nil, errors.New("signature on old tree head did not verify") + } + + // Get and verify against the current STH. + infoParams := tlog.NewGetLogInfoParamsWithContext(ctx) + result, err := rClient.Tlog.GetLogInfo(infoParams) + if err != nil { + return nil, err + } + + logInfo := result.GetPayload() + sth := util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*logInfo.SignedTreeHead)); err != nil { + return nil, err + } + + // Verify the signature on the SignedCheckpoint. + if !sth.Verify(verifier) { + return nil, errors.New("signature on tree head did not verify") + } + + // Now verify consistency up to the STH. + if err := ProveConsistency(ctx, rClient, oldSTH, &sth, *logInfo.TreeID); err != nil { + return nil, err + } + return &sth, nil +} + +// VerifyCheckpointSignature verifies the signature on a checkpoint (signed tree head). It does +// not verify consistency against other checkpoints. +// nolint +func VerifyCheckpointSignature(e *models.LogEntryAnon, verifier signature.Verifier) error { + sth := &util.SignedCheckpoint{} + if err := sth.UnmarshalText([]byte(*e.Verification.InclusionProof.Checkpoint)); err != nil { + return fmt.Errorf("unmarshalling log entry checkpoint to SignedCheckpoint: %w", err) + } + if !sth.Verify(verifier) { + return errors.New("signature on checkpoint did not verify") + } + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return errors.New("decoding inclusion proof root has") + } + + if !bytes.EqualFold(rootHash, sth.Hash) { + return fmt.Errorf("proof root hash does not match signed tree head, expected %s got %s", + *e.Verification.InclusionProof.RootHash, + hex.EncodeToString(sth.Hash)) + } + return nil +} + +// VerifyInclusion verifies an entry's inclusion proof. Clients MUST either verify +// the root hash against a new STH (via VerifyCurrentCheckpoint) or against a +// trusted, existing STH (via ProveConsistency). +// nolint +func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error { + if e.Verification == nil || e.Verification.InclusionProof == nil { + return errors.New("inclusion proof not provided") + } + + hashes := [][]byte{} + for _, h := range e.Verification.InclusionProof.Hashes { + hb, _ := hex.DecodeString(h) + hashes = append(hashes, hb) + } + + rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash) + if err != nil { + return err + } + + // Verify the inclusion proof. + entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string)) + if err != nil { + return err + } + leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes) + + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex), + uint64(*e.Verification.InclusionProof.TreeSize), leafHash, hashes, rootHash); err != nil { + return err + } + + return nil +} + +// VerifySignedEntryTimestamp verifies the entry's SET against the provided +// public key. +// nolint +func VerifySignedEntryTimestamp(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + if e.Verification == nil { + return errors.New("missing verification") + } + if e.Verification.SignedEntryTimestamp == nil { + return errors.New("signature missing") + } + + type bundle struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + // Note that this is the virtual index. + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` + } + bundlePayload := bundle{ + Body: e.Body, + IntegratedTime: *e.IntegratedTime, + LogIndex: *e.LogIndex, + LogID: *e.LogID, + } + contents, err := json.Marshal(bundlePayload) + if err != nil { + return fmt.Errorf("marshaling bundle: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing bundle: %w", err) + } + + // verify the SET against the public key + if err := verifier.VerifySignature(bytes.NewReader(e.Verification.SignedEntryTimestamp), + bytes.NewReader(canonicalized), options.WithContext(ctx)); err != nil { + return fmt.Errorf("unable to verify bundle: %w", err) + } + return nil +} + +// VerifyLogEntry performs verification of a LogEntry given a Rekor verifier. +// Performs inclusion proof verification up to a verified root hash, +// SignedEntryTimestamp verification, and checkpoint verification. +// nolint +func VerifyLogEntry(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error { + // Verify the inclusion proof using the body's leaf hash. + if err := VerifyInclusion(ctx, e); err != nil { + return err + } + + // Verify checkpoint, which includes a signed root hash. + if err := VerifyCheckpointSignature(e, verifier); err != nil { + return err + } + + // Verify the Signed Entry Timestamp. + if err := VerifySignedEntryTimestamp(ctx, e, verifier); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt new file mode 100644 index 0000000000..406ee26060 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt @@ -0,0 +1,13 @@ +Copyright 2023 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/LICENSE b/vendor/github.com/sigstore/sigstore-go/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go new file mode 100644 index 0000000000..4ce5df86c4 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go @@ -0,0 +1,406 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + protodsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse" + "golang.org/x/mod/semver" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +var ErrValidation = errors.New("validation error") +var ErrUnsupportedMediaType = fmt.Errorf("%w: unsupported media type", ErrValidation) +var ErrMissingVerificationMaterial = fmt.Errorf("%w: missing verification material", ErrValidation) +var ErrUnimplemented = errors.New("unimplemented") +var ErrInvalidAttestation = fmt.Errorf("%w: invalid attestation", ErrValidation) +var ErrMissingEnvelope = fmt.Errorf("%w: missing valid envelope", ErrInvalidAttestation) +var ErrDecodingJSON = fmt.Errorf("%w: decoding json", ErrInvalidAttestation) +var ErrDecodingB64 = fmt.Errorf("%w: decoding base64", ErrInvalidAttestation) + +const mediaTypeBase = "application/vnd.dev.sigstore.bundle" + +func ErrValidationError(err error) error { + return fmt.Errorf("%w: %w", ErrValidation, err) +} + +type Bundle struct { + *protobundle.Bundle + hasInclusionPromise bool + hasInclusionProof bool +} + +func NewBundle(pbundle *protobundle.Bundle) (*Bundle, error) { + bundle := &Bundle{ + Bundle: pbundle, + hasInclusionPromise: false, + hasInclusionProof: false, + } + + err := bundle.validate() + if err != nil { + return nil, err + } + + return bundle, nil +} + +// Deprecated: use Bundle instead +type ProtobufBundle = Bundle + +// Deprecated: use NewBundle instead +func NewProtobufBundle(b *protobundle.Bundle) (*ProtobufBundle, error) { + return NewBundle(b) +} + +func (b *Bundle) validate() error { + bundleVersion, err := getBundleVersion(b.Bundle.MediaType) + if err != nil { + return fmt.Errorf("error getting bundle version: %w", err) + } + + // if bundle version is < 0.1, return error + if semver.Compare(bundleVersion, "v0.1") < 0 { + return fmt.Errorf("%w: bundle version %s is not supported", ErrUnsupportedMediaType, bundleVersion) + } + + // fetch tlog entries, as next check needs to check them for inclusion proof/promise + entries, err := b.TlogEntries() + if err != nil { + return err + } + + // if bundle version == v0.1, require inclusion promise + if semver.Compare(bundleVersion, "v0.1") == 0 { + if len(entries) > 0 && !b.hasInclusionPromise { + return errors.New("inclusion promises missing in bundle (required for bundle v0.1)") + } + } else { + // if bundle version >= v0.2, require inclusion proof + if len(entries) > 0 && !b.hasInclusionProof { + return errors.New("inclusion proof missing in bundle (required for bundle v0.2)") + } + } + + // if bundle version >= v0.3, require verification material to not be X.509 certificate chain (only single certificate is allowed) + if semver.Compare(bundleVersion, "v0.3") >= 0 { + certs := b.Bundle.VerificationMaterial.GetX509CertificateChain() + + if certs != nil { + return errors.New("verification material cannot be X.509 certificate chain (for bundle v0.3)") + } + } + + // if bundle version is >= v0.4, return error as this version is not supported + if semver.Compare(bundleVersion, "v0.4") >= 0 { + return fmt.Errorf("%w: bundle version %s is not yet supported", ErrUnsupportedMediaType, bundleVersion) + } + + err = validateBundle(b.Bundle) + if err != nil { + return fmt.Errorf("invalid bundle: %w", err) + } + return nil +} + +// MediaTypeString returns a mediatype string for the specified bundle version. +// The function returns an error if the resulting string does validate. +func MediaTypeString(version string) (string, error) { + if version == "" { + return "", fmt.Errorf("unable to build media type string, no version defined") + } + + var mtString string + + version = strings.TrimPrefix(version, "v") + mtString = fmt.Sprintf("%s.v%s+json", mediaTypeBase, strings.TrimPrefix(version, "v")) + + if version == "0.1" || version == "0.2" { + mtString = fmt.Sprintf("%s+json;version=%s", mediaTypeBase, strings.TrimPrefix(version, "v")) + } + + if _, err := getBundleVersion(mtString); err != nil { + return "", fmt.Errorf("unable to build mediatype: %w", err) + } + + return mtString, nil +} + +func getBundleVersion(mediaType string) (string, error) { + switch mediaType { + case mediaTypeBase + "+json;version=0.1": + return "v0.1", nil + case mediaTypeBase + "+json;version=0.2": + return "v0.2", nil + case mediaTypeBase + "+json;version=0.3": + return "v0.3", nil + } + if strings.HasPrefix(mediaType, mediaTypeBase+".v") && strings.HasSuffix(mediaType, "+json") { + version := strings.TrimPrefix(mediaType, mediaTypeBase+".") + version = strings.TrimSuffix(version, "+json") + if semver.IsValid(version) { + return version, nil + } + return "", fmt.Errorf("%w: invalid bundle version: %s", ErrUnsupportedMediaType, version) + } + return "", fmt.Errorf("%w: %s", ErrUnsupportedMediaType, mediaType) +} + +func validateBundle(b *protobundle.Bundle) error { + if b == nil { + return fmt.Errorf("empty protobuf bundle") + } + + if b.Content == nil { + return fmt.Errorf("missing bundle content") + } + + switch b.Content.(type) { + case *protobundle.Bundle_DsseEnvelope, *protobundle.Bundle_MessageSignature: + default: + return fmt.Errorf("invalid bundle content: bundle content must be either a message signature or dsse envelope") + } + + if b.VerificationMaterial == nil { + return fmt.Errorf("missing verification material") + } + + if b.VerificationMaterial.Content == nil { + return fmt.Errorf("missing verification material content") + } + + switch b.VerificationMaterial.Content.(type) { + case *protobundle.VerificationMaterial_PublicKey, *protobundle.VerificationMaterial_Certificate, *protobundle.VerificationMaterial_X509CertificateChain: + default: + return fmt.Errorf("invalid verification material content: verification material must be one of public key, x509 certificate and x509 certificate chain") + } + + return nil +} + +func LoadJSONFromPath(path string) (*Bundle, error) { + var bundle Bundle + bundle.Bundle = new(protobundle.Bundle) + + contents, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + err = bundle.UnmarshalJSON(contents) + if err != nil { + return nil, err + } + + return &bundle, nil +} + +func (b *Bundle) MarshalJSON() ([]byte, error) { + return protojson.Marshal(b.Bundle) +} + +func (b *Bundle) UnmarshalJSON(data []byte) error { + b.Bundle = new(protobundle.Bundle) + err := protojson.Unmarshal(data, b.Bundle) + if err != nil { + return err + } + + err = b.validate() + if err != nil { + return err + } + + return nil +} + +func (b *Bundle) VerificationContent() (verify.VerificationContent, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + switch content := b.VerificationMaterial.GetContent().(type) { + case *protobundle.VerificationMaterial_X509CertificateChain: + if content.X509CertificateChain == nil { + return nil, ErrMissingVerificationMaterial + } + certs := content.X509CertificateChain.GetCertificates() + if len(certs) == 0 || certs[0].RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(certs[0].RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + Certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_Certificate: + if content.Certificate == nil || content.Certificate.RawBytes == nil { + return nil, ErrMissingVerificationMaterial + } + parsedCert, err := x509.ParseCertificate(content.Certificate.RawBytes) + if err != nil { + return nil, ErrValidationError(err) + } + cert := &Certificate{ + Certificate: parsedCert, + } + return cert, nil + case *protobundle.VerificationMaterial_PublicKey: + if content.PublicKey == nil { + return nil, ErrMissingVerificationMaterial + } + pk := &PublicKey{ + hint: content.PublicKey.Hint, + } + return pk, nil + + default: + return nil, ErrMissingVerificationMaterial + } +} + +func (b *Bundle) HasInclusionPromise() bool { + return b.hasInclusionPromise +} + +func (b *Bundle) HasInclusionProof() bool { + return b.hasInclusionProof +} + +func (b *Bundle) TlogEntries() ([]*tlog.Entry, error) { + if b.VerificationMaterial == nil { + return nil, nil + } + + tlogEntries := make([]*tlog.Entry, len(b.VerificationMaterial.TlogEntries)) + var err error + for i, entry := range b.VerificationMaterial.TlogEntries { + tlogEntries[i], err = tlog.ParseEntry(entry) + if err != nil { + return nil, ErrValidationError(err) + } + + if tlogEntries[i].HasInclusionPromise() { + b.hasInclusionPromise = true + } + if tlogEntries[i].HasInclusionProof() { + b.hasInclusionProof = true + } + } + + return tlogEntries, nil +} + +func (b *Bundle) SignatureContent() (verify.SignatureContent, error) { + switch content := b.Bundle.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + case *protobundle.Bundle_MessageSignature: + if content.MessageSignature == nil || content.MessageSignature.MessageDigest == nil { + return nil, ErrMissingVerificationMaterial + } + return NewMessageSignature( + content.MessageSignature.MessageDigest.Digest, + protocommon.HashAlgorithm_name[int32(content.MessageSignature.MessageDigest.Algorithm)], + content.MessageSignature.Signature, + ), nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Envelope() (*Envelope, error) { + switch content := b.Bundle.Content.(type) { //nolint:gocritic + case *protobundle.Bundle_DsseEnvelope: + envelope, err := parseEnvelope(content.DsseEnvelope) + if err != nil { + return nil, err + } + return envelope, nil + } + return nil, ErrMissingVerificationMaterial +} + +func (b *Bundle) Timestamps() ([][]byte, error) { + if b.VerificationMaterial == nil { + return nil, ErrMissingVerificationMaterial + } + + signedTimestamps := make([][]byte, 0) + + if b.VerificationMaterial.TimestampVerificationData == nil { + return signedTimestamps, nil + } + + for _, timestamp := range b.VerificationMaterial.TimestampVerificationData.Rfc3161Timestamps { + signedTimestamps = append(signedTimestamps, timestamp.SignedTimestamp) + } + + return signedTimestamps, nil +} + +// MinVersion returns true if the bundle version is greater than or equal to the expected version. +func (b *Bundle) MinVersion(expectVersion string) bool { + version, err := getBundleVersion(b.Bundle.MediaType) + if err != nil { + return false + } + + if !strings.HasPrefix(expectVersion, "v") { + expectVersion = "v" + expectVersion + } + + return semver.Compare(version, expectVersion) >= 0 +} + +func parseEnvelope(input *protodsse.Envelope) (*Envelope, error) { + if input == nil { + return nil, ErrMissingEnvelope + } + output := &dsse.Envelope{} + payload := input.GetPayload() + if payload == nil { + return nil, ErrMissingEnvelope + } + output.Payload = base64.StdEncoding.EncodeToString([]byte(payload)) + output.PayloadType = string(input.GetPayloadType()) + output.Signatures = make([]dsse.Signature, len(input.GetSignatures())) + for i, sig := range input.GetSignatures() { + if sig == nil { + return nil, ErrMissingEnvelope + } + output.Signatures[i].KeyID = sig.GetKeyid() + output.Signatures[i].Sig = base64.StdEncoding.EncodeToString(sig.GetSig()) + } + return &Envelope{Envelope: output}, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go new file mode 100644 index 0000000000..17b26e937e --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go @@ -0,0 +1,106 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "encoding/base64" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/verify" + "google.golang.org/protobuf/encoding/protojson" +) + +const IntotoMediaType = "application/vnd.in-toto+json" + +type MessageSignature struct { + digest []byte + digestAlgorithm string + signature []byte +} + +func (m *MessageSignature) Digest() []byte { + return m.digest +} + +func (m *MessageSignature) DigestAlgorithm() string { + return m.digestAlgorithm +} + +func NewMessageSignature(digest []byte, digestAlgorithm string, signature []byte) *MessageSignature { + return &MessageSignature{ + digest: digest, + digestAlgorithm: digestAlgorithm, + signature: signature, + } +} + +type Envelope struct { + *dsse.Envelope +} + +func (e *Envelope) Statement() (*in_toto.Statement, error) { + if e.PayloadType != IntotoMediaType { + return nil, ErrUnsupportedMediaType + } + + var statement in_toto.Statement + raw, err := e.DecodeB64Payload() + if err != nil { + return nil, ErrDecodingB64 + } + err = protojson.Unmarshal(raw, &statement) + if err != nil { + return nil, ErrDecodingJSON + } + return &statement, nil +} + +func (e *Envelope) EnvelopeContent() verify.EnvelopeContent { + return e +} + +func (e *Envelope) RawEnvelope() *dsse.Envelope { + return e.Envelope +} + +func (m *MessageSignature) EnvelopeContent() verify.EnvelopeContent { + return nil +} + +func (e *Envelope) MessageSignatureContent() verify.MessageSignatureContent { + return nil +} + +func (m *MessageSignature) MessageSignatureContent() verify.MessageSignatureContent { + return m +} + +func (m *MessageSignature) Signature() []byte { + return m.signature +} + +func (e *Envelope) Signature() []byte { + if len(e.Envelope.Signatures) == 0 { + return []byte{} + } + + sigBytes, err := base64.StdEncoding.DecodeString(e.Envelope.Signatures[0].Sig) + if err != nil { + return []byte{} + } + + return sigBytes +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go new file mode 100644 index 0000000000..b775295dea --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go @@ -0,0 +1,88 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bundle + +import ( + "crypto" + "crypto/x509" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/verify" +) + +type Certificate struct { + *x509.Certificate +} + +type PublicKey struct { + hint string +} + +func (pk PublicKey) Hint() string { + return pk.hint +} + +func (c *Certificate) CompareKey(key any, _ root.TrustedMaterial) bool { + x509Key, ok := key.(*x509.Certificate) + if !ok { + return false + } + + return c.Certificate.Equal(x509Key) +} + +func (c *Certificate) ValidAtTime(t time.Time, _ root.TrustedMaterial) bool { + return !(c.Certificate.NotAfter.Before(t) || c.Certificate.NotBefore.After(t)) +} + +func (c *Certificate) GetCertificate() *x509.Certificate { + return c.Certificate +} + +func (c *Certificate) HasPublicKey() (verify.PublicKeyProvider, bool) { + return PublicKey{}, false +} + +func (pk *PublicKey) CompareKey(key any, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + pubKey, err := verifier.PublicKey() + if err != nil { + return false + } + if equaler, ok := key.(interface{ Equal(x crypto.PublicKey) bool }); ok { + return equaler.Equal(pubKey) + } + return false +} + +func (pk *PublicKey) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool { + verifier, err := tm.PublicKeyVerifier(pk.hint) + if err != nil { + return false + } + return verifier.ValidAtTime(t) +} + +func (pk *PublicKey) GetCertificate() *x509.Certificate { + return nil +} + +func (pk *PublicKey) HasPublicKey() (verify.PublicKeyProvider, bool) { + return *pk, true +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go new file mode 100644 index 0000000000..e9a0680d16 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go @@ -0,0 +1,239 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go , +// EXCEPT: +// - the parseExtensions func has been renamed ParseExtensions + +package certificate + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI + OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} +) + +// Extensions contains all custom x509 extensions defined by Fulcio +type Extensions struct { + // NB: New extensions must be added here and documented + // at docs/oidc-info.md + + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Deprecated + // Triggering event of the Github Workflow. Matches the `event_name` claim of ID + // tokens from Github Actions + GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 + + // Deprecated + // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID + // tokens from Github Actions + GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3 + + // Deprecated + // Name of Github Actions Workflow. Matches the `workflow` claim of the ID + // tokens from Github Actions + GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 + + // Deprecated + // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID + // tokens from Github Actions + GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 + + // Deprecated + // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens + // from Github Actions + GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} + +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { + out := Extensions{} + + for _, e := range ext { + switch { + // BEGIN: Deprecated + case e.Id.Equal(OIDIssuer): + out.Issuer = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowTrigger): + out.GithubWorkflowTrigger = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowSHA): + out.GithubWorkflowSHA = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowName): + out.GithubWorkflowName = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRepository): + out.GithubWorkflowRepository = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRef): + out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := ParseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } + } + } + + // We only ever return nil, but leaving error in place so that we can add + // more complex parsing of fields in a backwards compatible way if needed. + return out, nil +} + +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go new file mode 100644 index 0000000000..78b7fee037 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go @@ -0,0 +1,90 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "crypto/x509" + "errors" + "fmt" + "reflect" + + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Summary struct { + CertificateIssuer string `json:"certificateIssuer"` + SubjectAlternativeName string `json:"subjectAlternativeName"` + Extensions +} + +type ErrCompareExtensions struct { + field string + expected string + actual string +} + +func (e *ErrCompareExtensions) Error() string { + return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual) +} + +func SummarizeCertificate(cert *x509.Certificate) (Summary, error) { + extensions, err := ParseExtensions(cert.Extensions) + + if err != nil { + return Summary{}, err + } + + var san string + + switch { + case len(cert.URIs) > 0: + san = cert.URIs[0].String() + case len(cert.EmailAddresses) > 0: + san = cert.EmailAddresses[0] + } + if san == "" { + san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions) + } + if san == "" { + return Summary{}, errors.New("No Subject Alternative Name found") + } + + return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil +} + +// CompareExtensions compares two Extensions structs and returns an error if +// any set values in the expected struct not equal. Empty fields in the +// expectedExt struct are ignored. +func CompareExtensions(expectedExt, actualExt Extensions) error { + expExtValue := reflect.ValueOf(expectedExt) + actExtValue := reflect.ValueOf(actualExt) + + fields := reflect.VisibleFields(expExtValue.Type()) + for _, field := range fields { + expectedFieldVal := expExtValue.FieldByName(field.Name) + + // if the expected field is empty, skip it + if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() { + actualFieldVal := actExtValue.FieldByName(field.Name) + if actualFieldVal.IsValid() { + if expectedFieldVal.Interface() != actualFieldVal.Interface() { + return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())} + } + } + } + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go new file mode 100644 index 0000000000..4e47f0ab7b --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "time" + + "github.com/sigstore/sigstore/pkg/signature" +) + +type TrustedMaterial interface { + TimestampingAuthorities() []CertificateAuthority + FulcioCertificateAuthorities() []CertificateAuthority + RekorLogs() map[string]*TransparencyLog + CTLogs() map[string]*TransparencyLog + PublicKeyVerifier(string) (TimeConstrainedVerifier, error) +} + +type BaseTrustedMaterial struct{} + +func (b *BaseTrustedMaterial) TimestampingAuthorities() []CertificateAuthority { + return []CertificateAuthority{} +} + +func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority { + return []CertificateAuthority{} +} + +func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog { + return map[string]*TransparencyLog{} +} + +func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) { + return nil, fmt.Errorf("public key verifier not found") +} + +type TrustedMaterialCollection []TrustedMaterial + +// Ensure types implement interfaces +var _ TrustedMaterial = &BaseTrustedMaterial{} +var _ TrustedMaterial = TrustedMaterialCollection{} + +func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + for _, tm := range tmc { + verifier, err := tm.PublicKeyVerifier(keyID) + if err == nil { + return verifier, nil + } + } + return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID) +} + +func (tmc TrustedMaterialCollection) TimestampingAuthorities() []CertificateAuthority { + var certAuthorities []CertificateAuthority + for _, tm := range tmc { + certAuthorities = append(certAuthorities, tm.TimestampingAuthorities()...) + } + return certAuthorities +} + +func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority { + var certAuthorities []CertificateAuthority + for _, tm := range tmc { + certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...) + } + return certAuthorities +} + +func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.RekorLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog { + rekorLogs := make(map[string]*TransparencyLog) + for _, tm := range tmc { + for keyID, tlogVerifier := range tm.CTLogs() { + rekorLogs[keyID] = tlogVerifier + } + } + return rekorLogs +} + +type ValidityPeriodChecker interface { + ValidAtTime(time.Time) bool +} + +type TimeConstrainedVerifier interface { + ValidityPeriodChecker + signature.Verifier +} + +type TrustedPublicKeyMaterial struct { + BaseTrustedMaterial + publicKeyVerifier func(string) (TimeConstrainedVerifier, error) +} + +func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + return tr.publicKeyVerifier(keyID) +} + +func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial { + return &TrustedPublicKeyMaterial{ + publicKeyVerifier: publicKeyVerifier, + } +} + +// ExpiringKey is a TimeConstrainedVerifier with a static validity period. +type ExpiringKey struct { + signature.Verifier + validityPeriodStart time.Time + validityPeriodEnd time.Time +} + +var _ TimeConstrainedVerifier = &ExpiringKey{} + +// ValidAtTime returns true if the key is valid at the given time. If the +// validity period start time is not set, the key is considered valid for all +// times before the end time. Likewise, if the validity period end time is not +// set, the key is considered valid for all times after the start time. +func (k *ExpiringKey) ValidAtTime(t time.Time) bool { + if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) { + return false + } + if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) { + return false + } + return true +} + +// NewExpiringKey returns a new ExpiringKey with the given validity period +func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey { + return &ExpiringKey{ + Verifier: verifier, + validityPeriodStart: validityPeriodStart, + validityPeriodEnd: validityPeriodEnd, + } +} + +// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to +// ExpiringKeys. +func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial { + return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) { + expiringKey, ok := trustedPublicKeys[keyID] + if !ok { + return nil, fmt.Errorf("public key not found for keyID: %s", keyID) + } + return expiringKey, nil + }) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go new file mode 100644 index 0000000000..3112aebb53 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go @@ -0,0 +1,459 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "log" + "os" + "sync" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/tuf" + "google.golang.org/protobuf/encoding/protojson" +) + +const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1" + +type TrustedRoot struct { + BaseTrustedMaterial + trustedRoot *prototrustroot.TrustedRoot + rekorLogs map[string]*TransparencyLog + fulcioCertAuthorities []CertificateAuthority + ctLogs map[string]*TransparencyLog + timestampingAuthorities []CertificateAuthority +} + +type CertificateAuthority struct { + Root *x509.Certificate + Intermediates []*x509.Certificate + Leaf *x509.Certificate + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + URI string +} + +type TransparencyLog struct { + BaseURL string + ID []byte + ValidityPeriodStart time.Time + ValidityPeriodEnd time.Time + // This is the hash algorithm used by the Merkle tree + HashFunc crypto.Hash + PublicKey crypto.PublicKey + // The hash algorithm used during signature creation + SignatureHashFunc crypto.Hash +} + +func (tr *TrustedRoot) TimestampingAuthorities() []CertificateAuthority { + return tr.timestampingAuthorities +} + +func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + return tr.fulcioCertAuthorities +} + +func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog { + return tr.rekorLogs +} + +func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog { + return tr.ctLogs +} + +func (tr *TrustedRoot) MarshalJSON() ([]byte, error) { + err := tr.constructProtoTrustRoot() + if err != nil { + return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err) + } + + return protojson.Marshal(tr.trustedRoot) +} + +func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) { + if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType()) + } + + trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot} + trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs()) + if err != nil { + return nil, err + } + + trustedRoot.fulcioCertAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.timestampingAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetTimestampAuthorities()) + if err != nil { + return nil, err + } + + trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs()) + if err != nil { + return nil, err + } + + return trustedRoot, nil +} + +func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) { + transparencyLogs = make(map[string]*TransparencyLog) + for _, tlog := range tlogs { + if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 { + return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm()) + } + if tlog.GetLogId() == nil { + return nil, fmt.Errorf("tlog missing log ID") + } + if tlog.GetLogId().GetKeyId() == nil { + return nil, fmt.Errorf("tlog missing log ID key ID") + } + encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId()) + + if tlog.GetPublicKey() == nil { + return nil, fmt.Errorf("tlog missing public key") + } + if tlog.GetPublicKey().GetRawBytes() == nil { + return nil, fmt.Errorf("tlog missing public key raw bytes") + } + + var hashFunc crypto.Hash + switch tlog.GetHashAlgorithm() { + case protocommon.HashAlgorithm_SHA2_256: + hashFunc = crypto.SHA256 + default: + return nil, fmt.Errorf("unsupported hash function for the tlog") + } + + tlogEntry := &TransparencyLog{ + BaseURL: tlog.GetBaseUrl(), + ID: tlog.GetLogId().GetKeyId(), + HashFunc: hashFunc, + SignatureHashFunc: crypto.SHA256, + } + + switch tlog.GetPublicKey().GetKeyDetails() { + case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, + protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, + protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var ecKey *ecdsa.PublicKey + var ok bool + if ecKey, ok = key.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = ecKey + // This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature + case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, + protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256: + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var rsaKey *rsa.PublicKey + var ok bool + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = rsaKey + case protocommon.PublicKeyDetails_PKIX_ED25519: //nolint:staticcheck + key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + var edKey ed25519.PublicKey + var ok bool + if edKey, ok = key.(ed25519.PublicKey); !ok { + return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails()) + } + tlogEntry.PublicKey = edKey + // This key format is deprecated, but currently in use for Sigstore staging instance + case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck + key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes()) + if err != nil { + return nil, err + } + tlogEntry.PublicKey = key + default: + return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails()) + } + + tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey) + transparencyLogs[encodedKeyID] = tlogEntry + + if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil { + if validFor.GetStart() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime() + } else { + return nil, fmt.Errorf("tlog missing public key validity period start time") + } + if validFor.GetEnd() != nil { + transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime() + } + } else { + return nil, fmt.Errorf("tlog missing public key validity period") + } + } + return transparencyLogs, nil +} + +func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) { + certificateAuthorities = make([]CertificateAuthority, len(certAuthorities)) + for i, certAuthority := range certAuthorities { + certificateAuthority, err := ParseCertificateAuthority(certAuthority) + if err != nil { + return nil, err + } + certificateAuthorities[i] = *certificateAuthority + } + return certificateAuthorities, nil +} + +func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (certificateAuthority *CertificateAuthority, err error) { + if certAuthority == nil { + return nil, fmt.Errorf("CertificateAuthority is nil") + } + certChain := certAuthority.GetCertChain() + if certChain == nil { + return nil, fmt.Errorf("CertificateAuthority missing cert chain") + } + chainLen := len(certChain.GetCertificates()) + if chainLen < 1 { + return nil, fmt.Errorf("CertificateAuthority cert chain is empty") + } + + certificateAuthority = &CertificateAuthority{ + URI: certAuthority.Uri, + } + for i, cert := range certChain.GetCertificates() { + parsedCert, err := x509.ParseCertificate(cert.RawBytes) + if err != nil { + return nil, err + } + switch { + case i == 0 && !parsedCert.IsCA: + certificateAuthority.Leaf = parsedCert + case i < chainLen-1: + certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert) + case i == chainLen-1: + certificateAuthority.Root = parsedCert + } + } + validFor := certAuthority.GetValidFor() + if validFor != nil { + start := validFor.GetStart() + if start != nil { + certificateAuthority.ValidityPeriodStart = start.AsTime() + } + end := validFor.GetEnd() + if end != nil { + certificateAuthority.ValidityPeriodEnd = end.AsTime() + } + } + + // TODO: Should we inspect/enforce ca.Subject and ca.Uri? + // TODO: Handle validity period (ca.ValidFor) + + return certificateAuthority, nil +} + +func NewTrustedRootFromPath(path string) (*TrustedRoot, error) { + trustedrootJSON, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return NewTrustedRootFromJSON(trustedrootJSON) +} + +// NewTrustedRootFromJSON returns the Sigstore trusted root. +func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) { + pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON) + if err != nil { + return nil, err + } + + return NewTrustedRootFromProtobuf(pbTrustedRoot) +} + +// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf. +func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) { + pbTrustedRoot := &prototrustroot.TrustedRoot{} + err := protojson.Unmarshal(rootJSON, pbTrustedRoot) + if err != nil { + return nil, err + } + return pbTrustedRoot, nil +} + +// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio +// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor +// transparency log instances. +func NewTrustedRoot(mediaType string, + certificateAuthorities []CertificateAuthority, + certificateTransparencyLogs map[string]*TransparencyLog, + timestampAuthorities []CertificateAuthority, + transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) { + // document that we assume 1 cert chain per target and with certs already ordered from leaf to root + if mediaType != TrustedRootMediaType01 { + return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", TrustedRootMediaType01) + } + tr := &TrustedRoot{ + fulcioCertAuthorities: certificateAuthorities, + ctLogs: certificateTransparencyLogs, + timestampingAuthorities: timestampAuthorities, + rekorLogs: transparencyLogs, + } + return tr, nil +} + +// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it. +func FetchTrustedRoot() (*TrustedRoot, error) { + return FetchTrustedRootWithOptions(tuf.DefaultOptions()) +} + +// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it. +func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + return GetTrustedRoot(client) +} + +// GetTrustedRoot returns the trusted root +func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) { + jsonBytes, err := c.GetTarget("trusted_root.json") + if err != nil { + return nil, err + } + return NewTrustedRootFromJSON(jsonBytes) +} + +func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash { + var h crypto.Hash + switch pk := pubKey.(type) { + case *rsa.PublicKey: + h = crypto.SHA256 + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + h = crypto.SHA256 + case elliptic.P384(): + h = crypto.SHA384 + case elliptic.P521(): + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + case ed25519.PublicKey: + h = crypto.SHA512 + default: + h = crypto.SHA256 + } + return h +} + +// LiveTrustedRoot is a wrapper around TrustedRoot that periodically +// refreshes the trusted root from TUF. This is needed for long-running +// processes to ensure that the trusted root does not expire. +type LiveTrustedRoot struct { + *TrustedRoot + mu sync.RWMutex +} + +// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically +// refresh the trusted root from TUF. +func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) { + client, err := tuf.New(opts) + if err != nil { + return nil, err + } + tr, err := GetTrustedRoot(client) + if err != nil { + return nil, err + } + ltr := &LiveTrustedRoot{ + TrustedRoot: tr, + mu: sync.RWMutex{}, + } + ticker := time.NewTicker(time.Hour * 24) + go func() { + for { + select { + case <-ticker.C: + client, err = tuf.New(opts) + if err != nil { + log.Printf("error creating TUF client: %v", err) + } + newTr, err := GetTrustedRoot(client) + if err != nil { + log.Printf("error fetching trusted root: %v", err) + continue + } + ltr.mu.Lock() + ltr.TrustedRoot = newTr + ltr.mu.Unlock() + } + } + }() + return ltr, nil +} + +func (l *LiveTrustedRoot) TimestampingAuthorities() []CertificateAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.TimestampingAuthorities() +} + +func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.FulcioCertificateAuthorities() +} + +func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.RekorLogs() +} + +func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.CTLogs() +} + +func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) { + l.mu.RLock() + defer l.mu.RUnlock() + return l.TrustedRoot.PublicKeyVerifier(keyID) +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go new file mode 100644 index 0000000000..b9de08b29f --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go @@ -0,0 +1,235 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "fmt" + "sort" + "time" + + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +func (tr *TrustedRoot) constructProtoTrustRoot() error { + tr.trustedRoot = &prototrustroot.TrustedRoot{} + tr.trustedRoot.MediaType = TrustedRootMediaType01 + + for logID, transparencyLog := range tr.rekorLogs { + tlProto, err := transparencyLogToProtobufTL(transparencyLog) + if err != nil { + return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Tlogs) + + for logID, ctLog := range tr.ctLogs { + ctProto, err := transparencyLogToProtobufTL(ctLog) + if err != nil { + return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err) + } + tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto) + } + // ensure stable sorting of the slice + sortTlogSlice(tr.trustedRoot.Ctlogs) + + for _, ca := range tr.fulcioCertAuthorities { + caProto, err := certificateAuthorityToProtobufCA(&ca) + if err != nil { + return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err) + } + tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.CertificateAuthorities) + + for _, ca := range tr.timestampingAuthorities { + caProto, err := certificateAuthorityToProtobufCA(&ca) + if err != nil { + return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err) + } + tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto) + } + // ensure stable sorting of the slice + sortCASlice(tr.trustedRoot.TimestampAuthorities) + + return nil +} + +func sortCASlice(slc []*prototrustroot.CertificateAuthority) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].ValidFor.Start != nil { + iTime = slc[i].ValidFor.Start.AsTime() + } + if slc[j].ValidFor.Start != nil { + jTime = slc[j].ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) { + sort.Slice(slc, func(i, j int) bool { + iTime := time.Unix(0, 0) + jTime := time.Unix(0, 0) + + if slc[i].PublicKey.ValidFor.Start != nil { + iTime = slc[i].PublicKey.ValidFor.Start.AsTime() + } + if slc[j].PublicKey.ValidFor.Start != nil { + jTime = slc[j].PublicKey.ValidFor.Start.AsTime() + } + + return iTime.Before(jTime) + }) +} + +func certificateAuthorityToProtobufCA(ca *CertificateAuthority) (*prototrustroot.CertificateAuthority, error) { + org := "" + if len(ca.Root.Subject.Organization) > 0 { + org = ca.Root.Subject.Organization[0] + } + var allCerts []*protocommon.X509Certificate + if ca.Leaf != nil { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw}) + } + for _, intermed := range ca.Intermediates { + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw}) + } + if ca.Root == nil { + return nil, fmt.Errorf("root certificate is nil") + } + allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw}) + + caProto := prototrustroot.CertificateAuthority{ + Uri: ca.URI, + Subject: &protocommon.DistinguishedName{ + Organization: org, + CommonName: ca.Root.Subject.CommonName, + }, + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(ca.ValidityPeriodStart), + }, + CertChain: &protocommon.X509CertificateChain{ + Certificates: allCerts, + }, + } + + if !ca.ValidityPeriodEnd.IsZero() { + caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd) + } + + return &caProto, nil +} + +func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) { + hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc) + if err != nil { + return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err) + } + publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd) + if err != nil { + return nil, fmt.Errorf("failed converting public key to protobuf: %w", err) + } + trProto := prototrustroot.TransparencyLogInstance{ + BaseUrl: tl.BaseURL, + HashAlgorithm: hashAlgo, + PublicKey: publicKey, + LogId: &protocommon.LogId{ + KeyId: tl.ID, + }, + } + + return &trProto, nil +} + +func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) { + switch hashAlgorithm { + case crypto.SHA256: + return protocommon.HashAlgorithm_SHA2_256, nil + case crypto.SHA384: + return protocommon.HashAlgorithm_SHA2_384, nil + case crypto.SHA512: + return protocommon.HashAlgorithm_SHA2_512, nil + case crypto.SHA3_256: + return protocommon.HashAlgorithm_SHA3_256, nil + case crypto.SHA3_384: + return protocommon.HashAlgorithm_SHA3_384, nil + default: + return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm) + } +} + +func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) { + pkd := protocommon.PublicKey{ + ValidFor: &protocommon.TimeRange{ + Start: timestamppb.New(start), + }, + } + + if !end.IsZero() { + pkd.ValidFor.End = timestamppb.New(end) + } + + rawBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return nil, fmt.Errorf("failed marshalling public key: %w", err) + } + pkd.RawBytes = rawBytes + + switch p := publicKey.(type) { + case *ecdsa.PublicKey: + switch p.Curve { + case elliptic.P256(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 + case elliptic.P384(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 + case elliptic.P521(): + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 + default: + return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve) + } + case *rsa.PublicKey: + switch p.Size() * 8 { + case 2048: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 + case 3072: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 + case 4096: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 + default: + return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size()) + } + case *ed25519.PublicKey: + pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519 + default: + return nil, fmt.Errorf("unknown public key type: %T", p) + } + + return &pkd, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go new file mode 100644 index 0000000000..939e25f0d0 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go @@ -0,0 +1,302 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlog + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1" + "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" + dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1" + hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1" + intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2" + rekorVerify "github.com/sigstore/rekor/pkg/verify" + "github.com/sigstore/sigstore/pkg/signature" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +type Entry struct { + kind string + version string + rekorEntry types.EntryImpl + logEntryAnon models.LogEntryAnon + signedEntryTimestamp []byte +} + +type RekorPayload struct { + Body interface{} `json:"body"` + IntegratedTime int64 `json:"integratedTime"` + LogIndex int64 `json:"logIndex"` + LogID string `json:"logID"` //nolint:tagliatelle +} + +var ErrNilValue = errors.New("validation error: nil value in transaction log entry") + +func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) { + pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer()) + if err != nil { + return nil, err + } + rekorEntry, err := types.UnmarshalEntry(pe) + if err != nil { + return nil, err + } + + entry := &Entry{ + rekorEntry: rekorEntry, + logEntryAnon: models.LogEntryAnon{ + Body: base64.StdEncoding.EncodeToString(body), + IntegratedTime: swag.Int64(integratedTime), + LogIndex: swag.Int64(logIndex), + LogID: swag.String(string(logID)), + }, + kind: pe.Kind(), + version: rekorEntry.APIVersion(), + } + + if len(signedEntryTimestamp) > 0 { + entry.signedEntryTimestamp = signedEntryTimestamp + } + + if inclusionProof != nil { + entry.logEntryAnon.Verification = &models.LogEntryAnonVerification{ + InclusionProof: inclusionProof, + } + } + + return entry, nil +} + +// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl). +func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) { + if protoEntry == nil || + protoEntry.CanonicalizedBody == nil || + protoEntry.IntegratedTime == 0 || + protoEntry.LogIndex == 0 || + protoEntry.LogId == nil || + protoEntry.LogId.KeyId == nil || + protoEntry.KindVersion == nil { + return nil, ErrNilValue + } + + signedEntryTimestamp := []byte{} + if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil { + signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp + } + + var inclusionProof *models.InclusionProof + + if protoEntry.InclusionProof != nil { + var hashes []string + + for _, v := range protoEntry.InclusionProof.Hashes { + hashes = append(hashes, hex.EncodeToString(v)) + } + + rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash) + + if protoEntry.InclusionProof.Checkpoint == nil { + return nil, fmt.Errorf("inclusion proof missing required checkpoint") + } + if protoEntry.InclusionProof.Checkpoint.Envelope == "" { + return nil, fmt.Errorf("inclusion proof checkpoint empty") + } + + inclusionProof = &models.InclusionProof{ + LogIndex: swag.Int64(protoEntry.InclusionProof.LogIndex), + RootHash: &rootHash, + TreeSize: swag.Int64(protoEntry.InclusionProof.TreeSize), + Hashes: hashes, + Checkpoint: swag.String(protoEntry.InclusionProof.Checkpoint.Envelope), + } + } + + entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof) + if err != nil { + return nil, err + } + + if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version { + return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version) + } + + return entry, nil +} + +func ValidateEntry(entry *Entry) error { + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + err := e.DSSEObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *hashedrekord_v001.V001Entry: + err := e.HashedRekordObj.Validate(strfmt.Default) + if err != nil { + return err + } + case *intoto_v002.V002Entry: + err := e.IntotoObj.Validate(strfmt.Default) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported entry type: %T", e) + } + + return nil +} + +func (entry *Entry) IntegratedTime() time.Time { + return time.Unix(*entry.logEntryAnon.IntegratedTime, 0) +} + +func (entry *Entry) Signature() []byte { + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature) + if err != nil { + return []byte{} + } + return sigBytes + case *hashedrekord_v001.V001Entry: + return e.HashedRekordObj.Signature.Content + case *intoto_v002.V002Entry: + sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig)) + if err != nil { + return []byte{} + } + return sigBytes + } + + return []byte{} +} + +func (entry *Entry) PublicKey() any { + var pemString []byte + + switch e := entry.rekorEntry.(type) { + case *dsse_v001.V001Entry: + pemString = []byte(*e.DSSEObj.Signatures[0].Verifier) + case *hashedrekord_v001.V001Entry: + pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content) + case *intoto_v002.V002Entry: + pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey) + } + + certBlock, _ := pem.Decode(pemString) + + var pk any + var err error + + pk, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + pk, err = x509.ParsePKIXPublicKey(certBlock.Bytes) + if err != nil { + return nil + } + } + + return pk +} + +func (entry *Entry) LogKeyID() string { + return *entry.logEntryAnon.LogID +} + +func (entry *Entry) LogIndex() int64 { + return *entry.logEntryAnon.LogIndex +} + +func (entry *Entry) Body() any { + return entry.logEntryAnon.Body +} + +func (entry *Entry) HasInclusionPromise() bool { + return entry.signedEntryTimestamp != nil +} + +func (entry *Entry) HasInclusionProof() bool { + return entry.logEntryAnon.Verification != nil +} + +func VerifyInclusion(entry *Entry, verifier signature.Verifier) error { + err := rekorVerify.VerifyInclusion(context.TODO(), &entry.logEntryAnon) + if err != nil { + return err + } + + err = rekorVerify.VerifyCheckpointSignature(&entry.logEntryAnon, verifier) + if err != nil { + return err + } + + return nil +} + +func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error { + rekorPayload := RekorPayload{ + Body: entry.logEntryAnon.Body, + IntegratedTime: *entry.logEntryAnon.IntegratedTime, + LogIndex: *entry.logEntryAnon.LogIndex, + LogID: hex.EncodeToString([]byte(*entry.logEntryAnon.LogID)), + } + + verifier, ok := verifiers[hex.EncodeToString([]byte(*entry.logEntryAnon.LogID))] + if !ok { + return errors.New("rekor log public key not found for payload") + } + if verifier.ValidityPeriodStart.IsZero() { + return errors.New("rekor validity period start time not set") + } + if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) || + (!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) { + return errors.New("rekor log public key not valid at payload integrated time") + } + + contents, err := json.Marshal(rekorPayload) + if err != nil { + return fmt.Errorf("marshaling: %w", err) + } + canonicalized, err := jsoncanonicalizer.Transform(contents) + if err != nil { + return fmt.Errorf("canonicalizing: %w", err) + } + + hash := sha256.Sum256(canonicalized) + if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok { + return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey) + } else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) { + return errors.New("unable to verify SET") + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go new file mode 100644 index 0000000000..134b2d73fc --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go @@ -0,0 +1,207 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" + "github.com/theupdateframework/go-tuf/v2/metadata/updater" + + "github.com/sigstore/sigstore-go/pkg/util" +) + +// Client is a Sigstore TUF client +type Client struct { + cfg *config.UpdaterConfig + up *updater.Updater + opts *Options +} + +// New returns a new client with custom options +func New(opts *Options) (*Client, error) { + var c = Client{ + opts: opts, + } + dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL)) + var err error + + if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil { + return nil, fmt.Errorf("failed to create TUF client: %w", err) + } + + c.cfg.LocalMetadataDir = dir + c.cfg.LocalTargetsDir = filepath.Join(dir, "targets") + c.cfg.DisableLocalCache = c.opts.DisableLocalCache + c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot + + if c.cfg.DisableLocalCache { + c.opts.CachePath = "" + c.opts.CacheValidity = 0 + c.opts.ForceCache = false + } + + if opts.Fetcher != nil { + c.cfg.Fetcher = opts.Fetcher + } else { + fetcher := fetcher.DefaultFetcher{} + fetcher.SetHTTPUserAgent(util.ConstructUserAgent()) + c.cfg.Fetcher = &fetcher + } + + // Upon client creation, we may not perform a full TUF update, + // based on the cache control configuration. Start with a local + // client (only reads content on disk) and then decide if we + // must perform a full TUF update. + tmpCfg := *c.cfg + // Create a temporary config for the first use where UnsafeLocalMode + // is true. This means that when we first initialize the client, + // we are guaranteed to only read the metadata on disk. + // Based on that metadata we take a decision if a full TUF + // refresh should be done or not. As so, the tmpCfg is only needed + // here and not in future invocations. + tmpCfg.UnsafeLocalMode = true + c.up, err = updater.New(&tmpCfg) + if err != nil { + return nil, err + } + if err = c.loadMetadata(); err != nil { + return nil, err + } + + return &c, nil +} + +// DefaultClient returns a Sigstore TUF client for the public good instance +func DefaultClient() (*Client, error) { + opts := DefaultOptions() + + return New(opts) +} + +// loadMetadata controls if the client actually should perform a TUF refresh. +// The TUF specification mandates so, but for certain Sigstore clients, it +// may be beneficial to rely on the cache, or in air-gapped deployments it +// it may not even be possible. +func (c *Client) loadMetadata() error { + // Load the metadata into memory and verify it + if err := c.up.Refresh(); err != nil { + // this is most likely due to the lack of metadata files + // on disk. Perform a full update and return. + return c.Refresh() + } + + if c.opts.ForceCache { + return nil + } else if c.opts.CacheValidity > 0 { + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Config may not exist, don't error + // create a new empty config + cfg = &Config{} + } + + cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity) + if time.Now().Before(cacheValidUntil) { + // No need to update + return nil + } + } + + return c.Refresh() +} + +func (c *Client) configPath() string { + var p = filepath.Join( + c.opts.CachePath, + fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)), + ) + + return p +} + +// Refresh forces a refresh of the underlying TUF client. +// As the tuf client updater does not support multiple refreshes during +// its life-time, this will replace the TUF client updater with a new one. +func (c *Client) Refresh() error { + var err error + + c.up, err = updater.New(c.cfg) + if err != nil { + return fmt.Errorf("failed to create tuf updater: %w", err) + } + err = c.up.Refresh() + if err != nil { + return fmt.Errorf("tuf refresh failed: %w", err) + } + + // Update config with last update + cfg, err := LoadConfig(c.configPath()) + if err != nil { + // Likely config file did not exit, create it + cfg = &Config{} + } + cfg.LastTimestamp = time.Now() + // ignore error writing update config file + _ = cfg.Persist(c.configPath()) + + return nil +} + +// GetTarget returns a target file from the TUF repository +func (c *Client) GetTarget(target string) ([]byte, error) { + // Set filepath to the empty string. When we get targets, + // we rely in the target info struct instead. + const filePath = "" + ti, err := c.up.GetTargetInfo(target) + if err != nil { + return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err) + } + + path, tb, err := c.up.FindCachedTarget(ti, filePath) + if err != nil { + return nil, fmt.Errorf("getting target cache: %w", err) + } + if path != "" { + // Cached version found + return tb, nil + } + + // Download of target is needed + // Ignore targetsBaseURL, set to empty string + const targetsBaseURL = "" + _, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL) + if err != nil { + return nil, fmt.Errorf("failed to download target file %s - %w", target, err) + } + + return tb, nil +} + +// URLToPath converts a URL to a filename-compatible string +func URLToPath(url string) string { + // Strip scheme, replace slashes with dashes + // e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root + fn := url + if len(fn) > 8 && fn[:8] == "https://" { + fn = fn[8:] + } + fn = strings.ReplaceAll(fn, "/", "-") + return fn +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go new file mode 100644 index 0000000000..3f5a81f1e5 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go @@ -0,0 +1,54 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "encoding/json" + "fmt" + "os" + "time" +) + +type Config struct { + LastTimestamp time.Time `json:"lastTimestamp"` +} + +func LoadConfig(p string) (*Config, error) { + var c Config + + b, err := os.ReadFile(p) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + err = json.Unmarshal(b, &c) + if err != nil { + return nil, fmt.Errorf("malformed config file: %w", err) + } + + return &c, nil +} + +func (c *Config) Persist(p string) error { + b, err := json.Marshal(c) + if err != nil { + return fmt.Errorf("failed to JSON marshal config: %w", err) + } + err = os.WriteFile(p, b, 0600) + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go new file mode 100644 index 0000000000..14b5ae8957 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go @@ -0,0 +1,173 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "embed" + "math" + "os" + "path/filepath" + + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +//go:embed repository +var embeddedRepo embed.FS + +const ( + DefaultMirror = "https://tuf-repo-cdn.sigstore.dev" + StagingMirror = "https://tuf-repo-cdn.sigstage.dev" + + // The following caching values can be used for the CacheValidity option + NoCache = 0 + MaxCache = math.MaxInt +) + +// Options represent the various options for a Sigstore TUF Client +type Options struct { + // CacheValidity period in days (default 0). The client will persist a + // timestamp with the cache after refresh. Note that the client will + // always refresh the cache if the metadata is expired or if the client is + // unable to find a persisted timestamp, so this is not an optimal control + // for air-gapped environments. Use const MaxCache to update the cache when + // the metadata is expired, though the first initialization will still + // refresh the cache. + CacheValidity int + // ForceCache controls if the cache should be used without update + // as long as the metadata is valid. Use ForceCache over CacheValidity + // if you want to always use the cache up until its expiration. Note that + // the client will refresh the cache once the metadata has expired, so this + // is not an optimal control for air-gapped environments. Clients instead + // should provide a trust root file directly to the client to bypass TUF. + ForceCache bool + // Root is the TUF trust anchor + Root []byte + // CachePath is the location on disk for TUF cache + // (default $HOME/.sigstore/tuf) + CachePath string + // RepositoryBaseURL is the TUF repository location URL + // (default https://tuf-repo-cdn.sigstore.dev) + RepositoryBaseURL string + // DisableLocalCache mode allows a client to work on a read-only + // files system if this is set, cache path is ignored. + DisableLocalCache bool + // DisableConsistentSnapshot + DisableConsistentSnapshot bool + // Fetcher is the metadata fetcher + Fetcher fetcher.Fetcher +} + +// WithCacheValidity sets the cache validity period in days +func (o *Options) WithCacheValidity(days int) *Options { + o.CacheValidity = days + return o +} + +// WithForceCache forces the client to use the cache without updating +func (o *Options) WithForceCache() *Options { + o.ForceCache = true + return o +} + +// WithRoot sets the TUF trust anchor +func (o *Options) WithRoot(root []byte) *Options { + o.Root = root + return o +} + +// WithCachePath sets the location on disk for TUF cache +func (o *Options) WithCachePath(path string) *Options { + o.CachePath = path + return o +} + +// WithRepositoryBaseURL sets the TUF repository location URL +func (o *Options) WithRepositoryBaseURL(url string) *Options { + o.RepositoryBaseURL = url + return o +} + +// WithDisableLocalCache sets the client to work on a read-only file system +func (o *Options) WithDisableLocalCache() *Options { + o.DisableLocalCache = true + return o +} + +// WithDisableConsistentSnapshot sets the client to disable consistent snapshot +func (o *Options) WithDisableConsistentSnapshot() *Options { + o.DisableConsistentSnapshot = true + return o +} + +// WithFetcher sets the metadata fetcher +func (o *Options) WithFetcher(f fetcher.Fetcher) *Options { + o.Fetcher = f + return o +} + +// DefaultOptions returns an options struct for the public good instance +func DefaultOptions() *Options { + var opts Options + var err error + + opts.Root = DefaultRoot() + home, err := os.UserHomeDir() + if err != nil { + // Fall back to using a TUF repository in the temp location + home = os.TempDir() + } + opts.CachePath = filepath.Join(home, ".sigstore", "root") + opts.RepositoryBaseURL = DefaultMirror + + return &opts +} + +// DefaultRoot returns the root.json for the public good instance +func DefaultRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} + +// StagingRoot returns the root.json for the staging instance +func StagingRoot() []byte { + // The embed file system always uses forward slashes as path separators, + // even on Windows + p := "repository/staging_root.json" + + b, err := embeddedRepo.ReadFile(p) + if err != nil { + // This should never happen. + // ReadFile from an embedded FS will never fail as long as + // the path is correct. If it fails, it would mean + // that the binary is not assembled as it should, and there + // is no way to recover from that. + panic(err) + } + + return b +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json new file mode 100644 index 0000000000..8c12a0c0a9 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json @@ -0,0 +1,164 @@ +{ + "signed": { + "_type": "root", + "spec_version": "1.0", + "version": 9, + "expires": "2024-09-12T06:53:10Z", + "keys": { + "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" + } + }, + "230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n" + } + }, + "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n" + } + }, + "923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" + } + }, + "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" + } + }, + "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" + } + }, + "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f": { + "keytype": "ecdsa", + "scheme": "ecdsa-sha2-nistp256", + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" + } + } + }, + "roles": { + "root": { + "keyids": [ + "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e", + "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e", + "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849", + "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523", + "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f" + ], + "threshold": 3 + }, + "snapshot": { + "keyids": [ + "230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac" + ], + "threshold": 1 + }, + "targets": { + "keyids": [ + "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e", + "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e", + "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849", + "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523", + "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f" + ], + "threshold": 3 + }, + "timestamp": { + "keyids": [ + "923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d" + ], + "threshold": 1 + } + }, + "consistent_snapshot": true + }, + "signatures": [ + { + "keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c", + "sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f" + }, + { + "keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99", + "sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260" + }, + { + "keyid": "f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f", + "sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736" + }, + { + "keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e", + "sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f" + }, + { + "keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e", + "sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260" + }, + { + "keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523", + "sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3" + }, + { + "keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de", + "sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75" + }, + { + "keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849", + "sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736" + }, + { + "keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f", + "sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75" + }, + { + "keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b", + "sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3" + } + ] +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json new file mode 100644 index 0000000000..a9174b1f18 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json @@ -0,0 +1,107 @@ +{ + "signatures": [ + { + "keyid": "762cb22caca65de5e9b7b6baecb84ca989d337280ce6914b6440aea95769ad93", + "sig": "3045022100ac48110076c9264a95e9cfdb7dc72fdf2aeefa6f0c06919f6780933ef00d8f33022040bcef86bfbe246a603b4d6def14ba9b3bd245b134257d570dd79ef52e8de134" + }, + { + "keyid": "d7d2d47a3f644fc3a685bac7b39c81ed9f9cee48ff861b44fbd86b91e34e7829", + "sig": "3046022100872bef41303c3ca2a7174f9b62c3999c05a2f4f79f0eb6a11d0196bc7e2b5068022100ecd664cf3cd5d280dd1ce479b3a9175ea4347e67e18f44db3f9872267cc20c5e" + }, + { + "keyid": "b78c9e4ff9048a1d9876a20f97fa1b3cb03223a0c520c7de730cfa9f5c7b77e5", + "sig": "3045022100c2e73ee944df991aa88fc9bdb6caaa94e0ca3b7d8c963bf3460eafc23f6ac1ce02202dfcf29fd52c768f9482511ed8382d42634a255e3ac435ca36928db81667e81d" + }, + { + "keyid": "afd6a6ebad62a0dd091db368c1806eeb172c893c80bece1098fed116e985ba35", + "sig": "30440220594071728ae3cc8751caf2f506f4a594b0b38d14eb0f244fc96bd54eba345f0d022069c155f8c98ada28ccf28a1420bb6e4fbed13689ac028c13d23142fd6799cd69" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2024-06-26T12:37:39Z", + "keys": { + "5416a7a35ef827abc651e200ac11f3d23e9db74ef890b1fedb69fb2a152ebac5": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExxmEtmhF5U+i+v/6he4BcSLzCgMx\n/0qSrvDg6bUWwUrkSKS2vDpcJrhGy5fmmhRrGawjPp1ALpC3y1kqFTpXDg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-online-uri": "gcpkms:projects/projectsigstore-staging/locations/global/keyRings/tuf-keyring/cryptoKeys/tuf-key/cryptoKeyVersions/2" + }, + "762cb22caca65de5e9b7b6baecb84ca989d337280ce6914b6440aea95769ad93": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEohqIdE+yTl4OxpX8ZxNUPrg3SL9H\nBDnhZuceKkxy2oMhUOxhWweZeG3bfM1T4ZLnJimC6CAYVU5+F5jZCoftRw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@jku" + }, + "afd6a6ebad62a0dd091db368c1806eeb172c893c80bece1098fed116e985ba35": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoxkvDOmtGEknB3M+ZkPts8joDM0X\nIH5JZwPlgC2CXs/eqOuNF8AcEWwGYRiDhV/IMlQw5bg8PLICQcgsbrDiKg==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@mnm678" + }, + "b78c9e4ff9048a1d9876a20f97fa1b3cb03223a0c520c7de730cfa9f5c7b77e5": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFHDb85JH+JYR1LQmxiz4UMokVMnP\nxKoWpaEnFCKXH8W4Fc/DfIxMnkpjCuvWUBdJXkO0aDIxwsij8TOFh2R7dw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@joshuagl" + }, + "d7d2d47a3f644fc3a685bac7b39c81ed9f9cee48ff861b44fbd86b91e34e7829": { + "keytype": "ecdsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE++Wv+DcLRk+mfkmlpCwl1GUi9EMh\npBUTz8K0fH7bE4mQuViGSyWA/eyMc0HvzZi6Xr0diHw0/lUPBvok214YQw==\n-----END PUBLIC KEY-----\n" + }, + "scheme": "ecdsa-sha2-nistp256", + "x-tuf-on-ci-keyowner": "@kommendorkapten" + } + }, + "roles": { + "root": { + "keyids": [ + "762cb22caca65de5e9b7b6baecb84ca989d337280ce6914b6440aea95769ad93", + "d7d2d47a3f644fc3a685bac7b39c81ed9f9cee48ff861b44fbd86b91e34e7829", + "b78c9e4ff9048a1d9876a20f97fa1b3cb03223a0c520c7de730cfa9f5c7b77e5", + "afd6a6ebad62a0dd091db368c1806eeb172c893c80bece1098fed116e985ba35" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "5416a7a35ef827abc651e200ac11f3d23e9db74ef890b1fedb69fb2a152ebac5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 3650, + "x-tuf-on-ci-signing-period": 365 + }, + "targets": { + "keyids": [ + "762cb22caca65de5e9b7b6baecb84ca989d337280ce6914b6440aea95769ad93", + "d7d2d47a3f644fc3a685bac7b39c81ed9f9cee48ff861b44fbd86b91e34e7829", + "b78c9e4ff9048a1d9876a20f97fa1b3cb03223a0c520c7de730cfa9f5c7b77e5", + "afd6a6ebad62a0dd091db368c1806eeb172c893c80bece1098fed116e985ba35" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "5416a7a35ef827abc651e200ac11f3d23e9db74ef890b1fedb69fb2a152ebac5" + ], + "threshold": 1, + "x-tuf-on-ci-expiry-period": 7, + "x-tuf-on-ci-signing-period": 4 + } + }, + "spec_version": "1.0", + "version": 7, + "x-tuf-on-ci-expiry-period": 91, + "x-tuf-on-ci-signing-period": 35 + } +} \ No newline at end of file diff --git a/vendor/cuelang.org/go/cue/scanner/fuzz.go b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go similarity index 61% rename from vendor/cuelang.org/go/cue/scanner/fuzz.go rename to vendor/github.com/sigstore/sigstore-go/pkg/util/util.go index a8b560e4b2..4ab68528b1 100644 --- a/vendor/cuelang.org/go/cue/scanner/fuzz.go +++ b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go @@ -1,4 +1,4 @@ -// Copyright 2019 CUE Authors +// Copyright 2024 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,28 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build gofuzz - -package scanner +package util import ( - "cuelang.org/go/cue/token" + "runtime/debug" ) -func Fuzz(b []byte) int { - retCode := 1 - eh := func(_ token.Pos, msg string, args []interface{}) { - retCode = 0 - } +func ConstructUserAgent() string { + userAgent := "sigstore-go" - var s Scanner - s.Init(token.NewFile("", 1, len(b)), b, eh, ScanComments) + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return userAgent + } - for { - _, tok, _ := s.Scan() - if tok == token.EOF { - break + for _, eachDep := range buildInfo.Deps { + if eachDep.Path == "github.com/sigstore/sigstore-go" { + userAgent += "/" + userAgent += eachDep.Version } } - return retCode + + return userAgent } diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go new file mode 100644 index 0000000000..4ce2dff287 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go @@ -0,0 +1,61 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) error { // nolint: revive + for _, ca := range trustedMaterial.FulcioCertificateAuthorities() { + if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) { + continue + } + if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) { + continue + } + + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(ca.Root) + intermediateCertPool := x509.NewCertPool() + for _, cert := range ca.Intermediates { + intermediateCertPool.AddCert(cert) + } + + // From spec: + // > ## Certificate + // > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)). + + opts := x509.VerifyOptions{ + CurrentTime: observerTimestamp, + Roots: rootCertPool, + Intermediates: intermediateCertPool, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageCodeSigning, + }, + } + + _, err := leafCert.Verify(opts) + if err == nil { + return nil + } + } + + return errors.New("leaf certificate verification failed") +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go new file mode 100644 index 0000000000..68fa7ad637 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" +) + +type SubjectAlternativeNameMatcher struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type IssuerMatcher struct { + Issuer string `json:"issuer"` + Regexp regexp.Regexp `json:"regexp,omitempty"` +} + +type CertificateIdentity struct { + SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"` + Issuer IssuerMatcher `json:"issuer"` + certificate.Extensions +} + +type CertificateIdentities []CertificateIdentity + +type ErrValueMismatch struct { + object string + expected string + actual string +} + +func (e *ErrValueMismatch) Error() string { + return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual) +} + +type ErrValueRegexMismatch struct { + object string + regex string + value string +} + +func (e *ErrValueRegexMismatch) Error() string { + return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value) +} + +type ErrNoMatchingCertificateIdentity struct { + errors []error +} + +func (e *ErrNoMatchingCertificateIdentity) Error() string { + if len(e.errors) > 0 { + return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1]) + } + return "no matching CertificateIdentity found" +} + +func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error { + return e.errors +} + +// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher. +// If the regexpStr fails to compile into a Regexp, an error is returned. +func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return SubjectAlternativeNameMatcher{}, err + } + + return SubjectAlternativeNameMatcher{ + SubjectAlternativeName: sanValue, + Regexp: *r}, nil +} + +// The default Regexp json marshal is quite ugly, so we override it here. +func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + SubjectAlternativeName string `json:"subjectAlternativeName"` + Regexp string `json:"regexp,omitempty"` + }{ + SubjectAlternativeName: s.SubjectAlternativeName, + Regexp: s.Regexp.String(), + }) +} + +// Verify checks if the actualCert matches the SANMatcher's Value and +// Regexp – if those values have been provided. +func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error { + if s.SubjectAlternativeName != "" && + actualCert.SubjectAlternativeName != s.SubjectAlternativeName { + return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)} + } + + if s.Regexp.String() != "" && + !s.Regexp.MatchString(actualCert.SubjectAlternativeName) { + return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)} + } + return nil +} + +func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) { + r, err := regexp.Compile(regexpStr) + if err != nil { + return IssuerMatcher{}, err + } + + return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil +} + +func (i *IssuerMatcher) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Issuer string `json:"issuer"` + Regexp string `json:"regexp,omitempty"` + }{ + Issuer: i.Issuer, + Regexp: i.Regexp.String(), + }) +} + +func (i IssuerMatcher) Verify(actualCert certificate.Summary) error { + if i.Issuer != "" && + actualCert.Extensions.Issuer != i.Issuer { + return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Extensions.Issuer)} + } + + if i.Regexp.String() != "" && + !i.Regexp.MatchString(actualCert.Extensions.Issuer) { + return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Extensions.Issuer)} + } + return nil +} + +func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) { + if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria") + } + + if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" { + return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria") + } + + if extensions.Issuer != "" { + return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions") + } + + certID := CertificateIdentity{ + SubjectAlternativeName: sanMatcher, + Issuer: issuerMatcher, + Extensions: extensions, + } + + return certID, nil +} + +// NewShortCertificateIdentity provides a more convenient way of initializing +// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need +// to check more OID extensions, use NewCertificateIdentity instead. +func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) { + sanMatcher, err := NewSANMatcher(sanValue, sanRegex) + if err != nil { + return CertificateIdentity{}, err + } + + issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex) + if err != nil { + return CertificateIdentity{}, err + } + + return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{}) +} + +// Verify verifies the CertificateIdentities, and if ANY of them match the cert, +// it returns the CertificateIdentity that matched. If none match, it returns an +// error. +func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) { + multierr := &ErrNoMatchingCertificateIdentity{} + var err error + for _, ci := range i { + if err = ci.Verify(cert); err == nil { + return &ci, nil + } + multierr.errors = append(multierr.errors, err) + } + return nil, multierr +} + +// Verify checks if the actualCert matches the CertificateIdentity's SAN and +// any of the provided OID extension values. Any empty values are ignored. +func (c CertificateIdentity) Verify(actualCert certificate.Summary) error { + var err error + if err = c.SubjectAlternativeName.Verify(actualCert); err != nil { + return err + } + + if err = c.Issuer.Verify(actualCert); err != nil { + return err + } + + return certificate.CompareExtensions(c.Extensions, actualCert.Extensions) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go similarity index 56% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go rename to vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go index ef9bbd37a8..18263c0983 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go @@ -1,4 +1,4 @@ -// Copyright The OpenTelemetry Authors +// Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,9 +12,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package verify -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" +import ( + "fmt" ) + +type ErrVerification struct { + err error +} + +func NewVerificationError(e error) ErrVerification { + return ErrVerification{e} +} + +func (e ErrVerification) Unwrap() error { + return e.err +} + +func (e ErrVerification) String() string { + return fmt.Sprintf("verification error: %s", e.err.Error()) +} + +func (e ErrVerification) Error() string { + return e.String() +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go new file mode 100644 index 0000000000..6440554c5c --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go @@ -0,0 +1,120 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "errors" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" +) + +var errNotImplemented = errors.New("not implemented") + +type HasInclusionPromise interface { + HasInclusionPromise() bool +} + +type HasInclusionProof interface { + HasInclusionProof() bool +} + +type SignatureProvider interface { + SignatureContent() (SignatureContent, error) +} + +type SignedTimestampProvider interface { + Timestamps() ([][]byte, error) +} + +type TlogEntryProvider interface { + TlogEntries() ([]*tlog.Entry, error) +} + +type VerificationProvider interface { + VerificationContent() (VerificationContent, error) +} + +type SignedEntity interface { + HasInclusionPromise + HasInclusionProof + SignatureProvider + SignedTimestampProvider + TlogEntryProvider + VerificationProvider +} + +type VerificationContent interface { + CompareKey(any, root.TrustedMaterial) bool + ValidAtTime(time.Time, root.TrustedMaterial) bool + GetCertificate() *x509.Certificate + HasPublicKey() (PublicKeyProvider, bool) +} + +type SignatureContent interface { + Signature() []byte + EnvelopeContent() EnvelopeContent + MessageSignatureContent() MessageSignatureContent +} + +type PublicKeyProvider interface { + Hint() string +} + +type MessageSignatureContent interface { + Digest() []byte + DigestAlgorithm() string + Signature() []byte +} + +type EnvelopeContent interface { + RawEnvelope() *dsse.Envelope + Statement() (*in_toto.Statement, error) +} + +// BaseSignedEntity is a helper struct that implements all the interfaces +// of SignedEntity. It can be embedded in a struct to implement the SignedEntity +// interface. This may be useful for testing, or for implementing a SignedEntity +// that only implements a subset of the interfaces. +type BaseSignedEntity struct{} + +func (b *BaseSignedEntity) VerificationProvider() (VerificationContent, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Envelope() (*dsse.Envelope, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) MessageSignature() (*protocommon.MessageSignature, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Signature() ([]byte, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) Timestamps() ([][]byte, error) { + return nil, errNotImplemented +} + +func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) { + return nil, errNotImplemented +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go new file mode 100644 index 0000000000..7b6edf67bc --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go @@ -0,0 +1,85 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "crypto/x509" + "encoding/hex" + "fmt" + + "github.com/google/certificate-transparency-go/ctutil" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509util" + "github.com/sigstore/sigstore-go/pkg/root" +) + +// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a +// leaf certificate, will extract SCTs from the leaf certificate and verify the +// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and +// CTLogs() +func VerifySignedCertificateTimestamp(leafCert *x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive + ctlogs := trustedMaterial.CTLogs() + fulcioCerts := trustedMaterial.FulcioCertificateAuthorities() + + scts, err := x509util.ParseSCTsFromCertificate(leafCert.Raw) + if err != nil { + return err + } + + leafCTCert, err := ctx509.ParseCertificates(leafCert.Raw) + if err != nil { + return err + } + + verified := 0 + for _, sct := range scts { + encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:]) + key, ok := ctlogs[encodedKeyID] + if !ok { + // skip entries the trust root cannot verify + continue + } + + for _, fulcioCa := range fulcioCerts { + fulcioChain := make([]*ctx509.Certificate, len(leafCTCert)) + copy(fulcioChain, leafCTCert) + + var parentCert []byte + + if len(fulcioCa.Intermediates) == 0 { + parentCert = fulcioCa.Root.Raw + } else { + parentCert = fulcioCa.Intermediates[0].Raw + } + + fulcioIssuer, err := ctx509.ParseCertificates(parentCert) + if err != nil { + continue + } + fulcioChain = append(fulcioChain, fulcioIssuer...) + + err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true) + if err == nil { + verified++ + } + } + } + + if verified < threshold { + return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go new file mode 100644 index 0000000000..1d0ddfabbd --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go @@ -0,0 +1,267 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "crypto" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/signature" + sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +const maxAllowedSubjects = 1024 +const maxAllowedSubjectDigests = 32 + +var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required") + +func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelope(verifier, envelope) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return errors.New("artifact must be provided to verify message signature") + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifact(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifact io.Reader) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelopeWithArtifact(verifier, envelope, artifact) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return verifyMessageSignature(verifier, msg, artifact) + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func VerifySignatureWithArtifactDigest(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifactDigest []byte, artifactDigestAlgorithm string) error { // nolint: revive + var verifier signature.Verifier + var err error + + verifier, err = getSignatureVerifier(verificationContent, trustedMaterial) + if err != nil { + return fmt.Errorf("could not load signature verifier: %w", err) + } + + if envelope := sigContent.EnvelopeContent(); envelope != nil { + return verifyEnvelopeWithArtifactDigest(verifier, envelope, artifactDigest, artifactDigestAlgorithm) + } else if msg := sigContent.MessageSignatureContent(); msg != nil { + return verifyMessageSignatureWithArtifactDigest(verifier, msg, artifactDigest) + } + + // handle an invalid signature content message + return fmt.Errorf("signature content has neither an envelope or a message") +} + +func getSignatureVerifier(verificationContent VerificationContent, tm root.TrustedMaterial) (signature.Verifier, error) { + if leafCert := verificationContent.GetCertificate(); leafCert != nil { + // TODO: Inspect certificate's SignatureAlgorithm to determine hash function + return signature.LoadVerifier(leafCert.PublicKey, crypto.SHA256) + } else if pk, ok := verificationContent.HasPublicKey(); ok { + return tm.PublicKeyVerifier(pk.Hint()) + } + + return nil, fmt.Errorf("no public key or certificate found") +} + +func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error { + dsseEnv := envelope.RawEnvelope() + + // A DSSE envelope in a Sigstore bundle MUST only contain one + // signature, even though DSSE is more permissive. + if len(dsseEnv.Signatures) != 1 { + return ErrDSSEInvalidSignatureCount + } + pub, err := verifier.PublicKey() + if err != nil { + return fmt.Errorf("could not fetch verifier public key: %w", err) + } + envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{ + SignatureVerifier: verifier, + Pub: pub, + }) + + if err != nil { + return fmt.Errorf("could not load envelope verifier: %w", err) + } + + _, err = envVerifier.Verify(context.TODO(), dsseEnv) + if err != nil { + return fmt.Errorf("could not verify envelope: %w", err) + } + + return nil +} + +func verifyEnvelopeWithArtifact(verifier signature.Verifier, envelope EnvelopeContent, artifact io.Reader) error { + err := verifyEnvelope(verifier, envelope) + if err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + var artifactDigestAlgorithm string + var artifactDigest []byte + + // Determine artifact digest algorithm by looking at the first subject's + // digests. This assumes that if a statement contains multiple subjects, + // they all use the same digest algorithm(s). + if len(statement.Subject) == 0 { + return errors.New("no subjects found in statement") + } + if len(statement.Subject[0].Digest) == 0 { + return errors.New("no digests found in statement") + } + + // Select the strongest digest algorithm available. + for _, alg := range []string{"sha512", "sha384", "sha256"} { + if _, ok := statement.Subject[0].Digest[alg]; ok { + artifactDigestAlgorithm = alg + continue + } + } + if artifactDigestAlgorithm == "" { + return errors.New("could not verify artifact: unsupported digest algorithm") + } + + // Compute digest of the artifact. + var hasher hash.Hash + switch artifactDigestAlgorithm { + case "sha512": + hasher = crypto.SHA512.New() + case "sha384": + hasher = crypto.SHA384.New() + case "sha256": + hasher = crypto.SHA256.New() + } + _, err = io.Copy(hasher, artifact) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err) + } + artifactDigest = hasher.Sum(nil) + + // limit the number of subjects to prevent DoS + if len(statement.Subject) > maxAllowedSubjects { + return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects) + } + + // Look for artifact digest in statement + for _, subject := range statement.Subject { + // limit the number of digests to prevent DoS + if len(subject.Digest) > maxAllowedSubjectDigests { + return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests) + } + for alg, digest := range subject.Digest { + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + if alg == artifactDigestAlgorithm && bytes.Equal(artifactDigest, hexdigest) { + return nil + } + } + } + return fmt.Errorf("could not verify artifact: unable to confirm artifact digest is present in subject digests: %w", err) +} + +func verifyEnvelopeWithArtifactDigest(verifier signature.Verifier, envelope EnvelopeContent, artifactDigest []byte, artifactDigestAlgorithm string) error { + err := verifyEnvelope(verifier, envelope) + if err != nil { + return err + } + statement, err := envelope.Statement() + if err != nil { + return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err) + } + + // limit the number of subjects to prevent DoS + if len(statement.Subject) > maxAllowedSubjects { + return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects) + } + + for _, subject := range statement.Subject { + // limit the number of digests to prevent DoS + if len(subject.Digest) > maxAllowedSubjectDigests { + return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests) + } + for alg, digest := range subject.Digest { + if alg == artifactDigestAlgorithm { + hexdigest, err := hex.DecodeString(digest) + if err != nil { + return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err) + } + if bytes.Equal(hexdigest, artifactDigest) { + return nil + } + } + } + } + return errors.New("provided artifact digest does not match any digest in statement") +} + +func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error { + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact) + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} + +func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error { + if !bytes.Equal(artifactDigest, msg.Digest()) { + return errors.New("artifact does not match digest") + } + if _, ok := verifier.(*signature.ED25519Verifier); ok { + return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest") + } + err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest)) + + if err != nil { + return fmt.Errorf("could not verify message: %w", err) + } + + return nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go new file mode 100644 index 0000000000..2ab826e911 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go @@ -0,0 +1,746 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "encoding/asn1" + "errors" + "fmt" + "io" + "time" + + in_toto "github.com/in-toto/attestation/go/v1" + "github.com/sigstore/sigstore-go/pkg/fulcio/certificate" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1" +) + +type SignedEntityVerifier struct { + trustedMaterial root.TrustedMaterial + config VerifierConfig +} + +type VerifierConfig struct { // nolint: revive + // performOnlineVerification queries logs during verification. + // Default is offline + performOnlineVerification bool + // weExpectSignedTimestamps requires RFC3161 timestamps to verify + // short-lived certificates + weExpectSignedTimestamps bool + // signedTimestampThreshold is the minimum number of verified + // RFC3161 timestamps in a bundle + signedTimestampThreshold int + // requireIntegratedTimestamps requires log entry integrated timestamps to + // verify short-lived certificates + requireIntegratedTimestamps bool + // integratedTimeThreshold is the minimum number of log entry + // integrated timestamps in a bundle + integratedTimeThreshold int + // requireObserverTimestamps requires RFC3161 timestamps and/or log + // integrated timestamps to verify short-lived certificates + requireObserverTimestamps bool + // observerTimestampThreshold is the minimum number of verified + // RFC3161 timestamps and/or log integrated timestamps in a bundle + observerTimestampThreshold int + // weExpectTlogEntries requires log inclusion proofs in a bundle + weExpectTlogEntries bool + // tlogEntriesThreshold is the minimum number of verified inclusion + // proofs in a bundle + tlogEntriesThreshold int + // weExpectSCTs requires SCTs in Fulcio certificates + weExpectSCTs bool + // ctlogEntriesTreshold is the minimum number of verified SCTs in + // a Fulcio certificate + ctlogEntriesThreshold int + // weDoNotExpectAnyObserverTimestamps uses the certificate's lifetime + // rather than a provided signed or log timestamp. Most workflows will + // not use this option + weDoNotExpectAnyObserverTimestamps bool +} + +type VerifierOption func(*VerifierConfig) error + +// NewSignedEntityVerifier creates a new SignedEntityVerifier. It takes a +// root.TrustedMaterial, which contains a set of trusted public keys and +// certificates, and a set of VerifierConfigurators, which set the config +// that determines the behaviour of the Verify function. +// +// VerifierConfig's set of options should match the properties of a given +// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed +// timestamps. +func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*SignedEntityVerifier, error) { + var err error + c := VerifierConfig{} + + for _, opt := range options { + err = opt(&c) + if err != nil { + return nil, fmt.Errorf("failed to configure verifier: %w", err) + } + } + + err = c.Validate() + if err != nil { + return nil, err + } + + v := &SignedEntityVerifier{ + trustedMaterial: trustedMaterial, + config: c, + } + + return v, nil +} + +// WithOnlineVerification configures the SignedEntityVerifier to perform +// online verification when verifying Transparency Log entries and +// Signed Certificate Timestamps. +func WithOnlineVerification() VerifierOption { + return func(c *VerifierConfig) error { + c.performOnlineVerification = true + return nil + } +} + +// WithSignedTimestamps configures the SignedEntityVerifier to expect RFC 3161 +// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's +// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s) +// to verify the Fulcio certificate. +func WithSignedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("signed timestamp threshold must be at least 1") + } + c.weExpectSignedTimestamps = true + c.signedTimestampThreshold = threshold + return nil + } +} + +// WithObserverTimestamps configures the SignedEntityVerifier to expect +// timestamps from either an RFC3161 timestamp authority or a log's +// SignedEntryTimestamp. These are verified using the TrustedMaterial's +// TimestampingAuthorities() or RekorLogs(), and used to verify +// the Fulcio certificate. +func WithObserverTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("observer timestamp threshold must be at least 1") + } + c.requireObserverTimestamps = true + c.observerTimestampThreshold = threshold + return nil + } +} + +// WithTransparencyLog configures the SignedEntityVerifier to expect +// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them +// using the TrustedMaterial's RekorLogs(). +func WithTransparencyLog(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("transparency log entry threshold must be at least 1") + } + c.weExpectTlogEntries = true + c.tlogEntriesThreshold = threshold + return nil + } +} + +// WithIntegratedTimestamps configures the SignedEntityVerifier to +// expect log entry integrated timestamps from either SignedEntryTimestamps +// or live log lookups. +func WithIntegratedTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + c.requireIntegratedTimestamps = true + c.integratedTimeThreshold = threshold + return nil + } +} + +// WithSignedCertificateTimestamps configures the SignedEntityVerifier to +// expect the Fulcio certificate to have a SignedCertificateTimestamp, and +// verify it using the TrustedMaterial's CTLogAuthorities(). +func WithSignedCertificateTimestamps(threshold int) VerifierOption { + return func(c *VerifierConfig) error { + if threshold < 1 { + return errors.New("ctlog entry threshold must be at least 1") + } + c.weExpectSCTs = true + c.ctlogEntriesThreshold = threshold + return nil + } +} + +// WithoutAnyObserverTimestampsUnsafe configures the SignedEntityVerifier to not expect +// any timestamps from either a Timestamp Authority or a Transparency Log. +// +// A SignedEntity without a trusted "observer" timestamp to verify the attached +// Fulcio certificate can't provide the same kind of integrity guarantee. +// +// Do not enable this if you don't know what you are doing; as the name implies, +// using it defeats part of the security guarantees offered by Sigstore. This +// option is only useful for testing. +func WithoutAnyObserverTimestampsUnsafe() VerifierOption { + return func(c *VerifierConfig) error { + c.weDoNotExpectAnyObserverTimestamps = true + return nil + } +} + +func (c *VerifierConfig) Validate() error { + if !c.requireObserverTimestamps && !c.weExpectSignedTimestamps && !c.requireIntegratedTimestamps && !c.weDoNotExpectAnyObserverTimestamps { + return errors.New("when initializing a new SignedEntityVerifier, you must specify at least one of " + + "WithObserverTimestamps(), WithSignedTimestamps(), WithIntegratedTimestamps(), or WithoutAnyObserverTimestampsUnsafe()") + } + + return nil +} + +type VerificationResult struct { + MediaType string `json:"mediaType"` + Statement *in_toto.Statement `json:"statement,omitempty"` + Signature *SignatureVerificationResult `json:"signature,omitempty"` + VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"` + VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"` +} + +type SignatureVerificationResult struct { + PublicKeyID *[]byte `json:"publicKeyId,omitempty"` + Certificate *certificate.Summary `json:"certificate,omitempty"` +} + +type TimestampVerificationResult struct { + Type string `json:"type"` + URI string `json:"uri"` + Timestamp time.Time `json:"timestamp"` +} + +func NewVerificationResult() *VerificationResult { + return &VerificationResult{ + MediaType: VerificationResultMediaType01, + } +} + +type PolicyOption func(*PolicyConfig) error +type ArtifactPolicyOption func(*PolicyConfig) error + +// PolicyBuilder is responsible for building & validating a PolicyConfig +type PolicyBuilder struct { + artifactPolicy ArtifactPolicyOption + policyOptions []PolicyOption +} + +func (pc PolicyBuilder) Options() []PolicyOption { + arr := []PolicyOption{PolicyOption(pc.artifactPolicy)} + return append(arr, pc.policyOptions...) +} + +func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) { + var err error + + policy := &PolicyConfig{} + for _, applyOption := range pc.Options() { + err = applyOption(policy) + if err != nil { + return nil, err + } + } + + if err := policy.Validate(); err != nil { + return nil, err + } + + return policy, nil +} + +type PolicyConfig struct { + weDoNotExpectAnArtifact bool + weDoNotExpectIdentities bool + weExpectSigningKey bool + certificateIdentities CertificateIdentities + verifyArtifact bool + artifact io.Reader + verifyArtifactDigest bool + artifactDigest []byte + artifactDigestAlgorithm string +} + +func (p *PolicyConfig) Validate() error { + if p.WeExpectIdentities() && len(p.certificateIdentities) == 0 { + return errors.New("can't verify identities without providing at least one identity") + } + + return nil +} + +// WeExpectAnArtifact returns true if the Verify algorithm should perform +// signature verification with an an artifact provided by either the +// WithArtifact or the WithArtifactDigest functions. +// +// By default, unless explicitly turned off, we should always expect to verify +// a SignedEntity's signature using an artifact. Bools are initialized to false, +// so this behaviour is therefore controlled by the weDoNotExpectAnArtifact +// field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) WeExpectAnArtifact() bool { + return !p.weDoNotExpectAnArtifact +} + +// WeExpectIdentities returns true if the Verify algorithm should check +// whether the SignedEntity's certificate was created by one of the identities +// provided by the WithCertificateIdentity function. +// +// By default, unless explicitly turned off, we should always expect to enforce +// that a SignedEntity's certificate was created by an Identity we trust. Bools +// are initialized to false, so this behaviour is therefore controlled by the +// weDoNotExpectIdentities field. +// +// Double negatives are confusing, though. To aid with comprehension of the +// main Verify loop, this function therefore just wraps the double negative. +func (p *PolicyConfig) WeExpectIdentities() bool { + return !p.weDoNotExpectIdentities +} + +// WeExpectSigningKey returns true if we expect the SignedEntity to be signed +// with a key and not a certificate. +func (p *PolicyConfig) WeExpectSigningKey() bool { + return p.weExpectSigningKey +} + +func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder { + return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options} +} + +// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any +// checks on the identity that created the SignedEntity being verified. +// +// Do not use this option unless you know what you are doing! +// +// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of +// exceptional circumstances, we should always enforce that the SignedEntity +// being verified was signed by a trusted CertificateIdentity. +// +// For more information, consult WithCertificateIdentity. +func WithoutIdentitiesUnsafe() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities") + } + + p.weDoNotExpectIdentities = true + return nil + } +} + +// WithCertificateIdentity allows the caller of Verify to enforce that the +// SignedEntity being verified was signed by the given identity, as defined by +// the Fulcio certificate embedded in the entity. If this policy is enabled, +// but the SignedEntity does not have a certificate, verification will fail. +// +// Providing this function multiple times will concatenate the provided +// CertificateIdentity to the list of identities being checked. +// +// If all of the provided CertificateIdentities fail to match the Fulcio +// certificate, then verification will fail. If *any* CertificateIdentity +// matches, then verification will succeed. Therefore, each CertificateIdentity +// provided to this function must define a "sufficient" identity to trust. +// +// The CertificateIdentity struct allows callers to specify: +// - The exact value, or Regexp, of the SubjectAlternativeName +// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer +// +// For convenience, consult the NewShortCertificateIdentity function. +func WithCertificateIdentity(identity CertificateIdentity) PolicyOption { + return func(p *PolicyConfig) error { + if p.weDoNotExpectIdentities { + return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe") + } + if p.weExpectSigningKey { + return errors.New("can't use WithCertificateIdentity while using WithKey") + } + + p.certificateIdentities = append(p.certificateIdentities, identity) + return nil + } +} + +// WithKey allows the caller of Verify to require the SignedEntity being +// verified was signed with a key and not a certificate. +func WithKey() PolicyOption { + return func(p *PolicyConfig) error { + if len(p.certificateIdentities) > 0 { + return errors.New("can't use WithKey while using WithCertificateIdentity") + } + + p.weExpectSigningKey = true + p.weDoNotExpectIdentities = true + return nil + } +} + +// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether +// the SignedEntity was created from, or references, an artifact. +// +// WithoutArtifactUnsafe can only be used with SignedEntities that contain a +// DSSE envelope. If the the SignedEntity has a MessageSignature, providing +// this policy option will cause verification to always fail, since +// MessageSignatures can only be verified in the presence of an Artifact or +// artifact digest. See WithArtifact/WithArtifactDigest for more informaiton. +// +// Do not use this function unless you know what you are doing! +// +// As the name implies, using WithoutArtifactUnsafe is not safe: outside of +// exceptional circumstances, SignedEntities should always be verified with +// an artifact. +func WithoutArtifactUnsafe() ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("can't use WithoutArtifactUnsafe while using WithArtifact or WithArtifactDigest") + } + + p.weDoNotExpectAnArtifact = true + return nil + } +} + +// WithArtifact allows the caller of Verify to enforce that the SignedEntity +// being verified was created from, or references, a given artifact. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// calculated from the given artifact, and compared to the digest in the +// envelope's statement. +func WithArtifact(artifact io.Reader) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("only one invocation of WithArtifact/WithArtifactDigest is allowed") + } + + if p.weDoNotExpectAnArtifact { + return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe") + } + + p.verifyArtifact = true + p.artifact = artifact + return nil + } +} + +// WithArtifactDigest allows the caller of Verify to enforce that the +// SignedEntity being verified was created for a given artifact digest. +// +// If the SignedEntity contains a MessageSignature that was signed using the +// ED25519 algorithm, then providing only an artifactDigest will fail; the +// whole artifact must be provided. Use WithArtifact instead. +// +// If the SignedEntity contains a DSSE envelope, then the artifact digest is +// compared to the digest in the envelope's statement. +func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption { + return func(p *PolicyConfig) error { + if p.verifyArtifact || p.verifyArtifactDigest { + return errors.New("only one invocation of WithArtifact/WithArtifactDigest is allowed") + } + + if p.weDoNotExpectAnArtifact { + return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe") + } + + p.verifyArtifactDigest = true + p.artifactDigestAlgorithm = algorithm + p.artifactDigest = artifactDigest + return nil + } +} + +// Verify checks the cryptographic integrity of a given SignedEntity according +// to the options configured in the NewSignedEntityVerifier. Its purpose is to +// determine whether the SignedEntity was created by a Sigstore deployment we +// trust, as defined by keys in our TrustedMaterial. +// +// If the SignedEntity contains a MessageSignature, then the artifact or its +// digest must be provided to the Verify function, as it is required to verify +// the signature. See WithArtifact and WithArtifactDigest for more details. +// +// If and only if verification is successful, Verify will return a +// VerificationResult struct whose contents' integrity have been verified. +// Verify may then verify the contents of the VerificationResults using supplied +// PolicyOptions. See WithCertificateIdentity for more details. +// +// Callers of this function SHOULD ALWAYS: +// - (if the signed entity has a certificate) verify that its Subject Alternate +// Name matches a trusted identity, and that its OID Issuer field matches an +// expected value +// - (if the signed entity has a dsse envelope) verify that the envelope's +// statement's subject matches the artifact being verified +func (v *SignedEntityVerifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) { + policy, err := pb.BuildConfig() + if err != nil { + return nil, fmt.Errorf("failed to build policy: %w", err) + } + + // Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh + // > ## Transparency Log Entry + verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity) + if err != nil { + return nil, fmt.Errorf("failed to verify log inclusion: %w", err) + } + + // > ## Establishing a Time for the Signature + // > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first. + verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps) + if err != nil { + return nil, fmt.Errorf("failed to verify timestamps: %w", err) + } + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch verification content: %w", err) + } + + var signedWithCertificate bool + var certSummary certificate.Summary + + // If the bundle was signed with a long-lived key, and does not have a Fulcio certificate, + // then skip the certificate verification steps + if leafCert := verificationContent.GetCertificate(); leafCert != nil { + if policy.WeExpectSigningKey() { + return nil, errors.New("expected key signature, not certificate") + } + + signedWithCertificate = true + + // Get the summary before modifying the cert extensions + certSummary, err = certificate.SummarizeCertificate(leafCert) + if err != nil { + return nil, fmt.Errorf("failed to summarize certificate: %w", err) + } + + // From spec: + // > ## Certificate + // > … + // > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails. + + // Go does not support the OtherName GeneralName SAN extension. If + // Fulcio issued the certificate with an OtherName SAN, it will be + // handled by SummarizeCertificate above, and it must be removed here + // or the X.509 verification will fail. + if len(leafCert.UnhandledCriticalExtensions) > 0 { + var unhandledExts []asn1.ObjectIdentifier + for _, oid := range leafCert.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + unhandledExts = append(unhandledExts, oid) + } + } + leafCert.UnhandledCriticalExtensions = unhandledExts + } + + for _, verifiedTs := range verifiedTimestamps { + // verify the leaf certificate against the root + err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify leaf certificate: %w", err) + } + } + + // From spec: + // > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log. + + if v.config.weExpectSCTs { + err = VerifySignedCertificateTimestamp(leafCert, v.config.ctlogEntriesThreshold, v.trustedMaterial) + if err != nil { + return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err) + } + } + } + + // From spec: + // > ## Signature Verification + // > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain. + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, fmt.Errorf("failed to fetch signature content: %w", err) + } + + if policy.WeExpectAnArtifact() { + switch { + case policy.verifyArtifact: + err = VerifySignatureWithArtifact(sigContent, verificationContent, v.trustedMaterial, policy.artifact) + case policy.verifyArtifactDigest: + err = VerifySignatureWithArtifactDigest(sigContent, verificationContent, v.trustedMaterial, policy.artifactDigest, policy.artifactDigestAlgorithm) + default: + // should never happen, but just in case: + err = errors.New("no artifact or artifact digest provided") + } + } else { + // verifying with artifact has been explicitly turned off, so just check + // the signature on the dsse envelope: + err = VerifySignature(sigContent, verificationContent, v.trustedMaterial) + } + + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %w", err) + } + + // Hooray! We've verified all of the entity's constituent parts! 🎉 🥳 + // Now we can construct the results object accordingly. + result := NewVerificationResult() + if signedWithCertificate { + result.Signature = &SignatureVerificationResult{ + Certificate: &certSummary, + } + } + + // SignatureContent can be either an Envelope or a MessageSignature. + // If it's an Envelope, let's pop the Statement for our results: + if envelope := sigContent.EnvelopeContent(); envelope != nil { + stmt, err := envelope.Statement() + if err != nil { + return nil, fmt.Errorf("failed to fetch envelope statement: %w", err) + } + + result.Statement = stmt + } + + result.VerifiedTimestamps = verifiedTimestamps + + // Now that the signed entity's crypto material has been verified, and the + // result struct has been constructed, we can optionally enforce some + // additional policies: + // -------------------- + + // From ## Certificate section, + // >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate. + if policy.WeExpectIdentities() { + if !signedWithCertificate { + // We got asked to verify identities, but the entity was not signed with + // a certificate. That's a problem! + return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate") + } + + if len(policy.certificateIdentities) == 0 { + return nil, errors.New("can't verify certificate identities: no identities provided") + } + + matchingCertID, err := policy.certificateIdentities.Verify(certSummary) + if err != nil { + return nil, fmt.Errorf("failed to verify certificate identity: %w", err) + } + + result.VerifiedIdentity = matchingCertID + } + + return result, nil +} + +// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns +// a list of verified timestamps from the log integrated timestamps when verifying +// with observer timestamps. +// TODO: Return a different verification result for logs specifically (also for #48) +func (v *SignedEntityVerifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + if v.config.weExpectTlogEntries { + // log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used + verifiedTlogTimestamps, err := VerifyArtifactTransparencyLog(entity, v.trustedMaterial, v.config.tlogEntriesThreshold, + v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps, v.config.performOnlineVerification) + if err != nil { + return nil, err + } + + for _, vts := range verifiedTlogTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: "TODO", Timestamp: vts}) + } + } + + return verifiedTimestamps, nil +} + +// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies +// that timestamp thresholds are met with log entry integrated timestamps, +// signed timestamps, or a combination of both. The returned timestamps +// can be used to verify short-lived certificates. +// logTimestamps may be populated with verified log entry integrated timestamps +// In order to be verifiable, a SignedEntity must have at least one verified +// "observer timestamp". +func (v *SignedEntityVerifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) { + verifiedTimestamps := []TimestampVerificationResult{} + + // From spec: + // > … if verification or timestamp parsing fails, the Verifier MUST abort + if v.config.weExpectSignedTimestamps { + verifiedSignedTimestamps, err := VerifyTimestampAuthorityWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold) + if err != nil { + return nil, err + } + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: "TODO", Timestamp: vts}) + } + } + + if v.config.requireIntegratedTimestamps { + if len(logTimestamps) < v.config.integratedTimeThreshold { + return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold) + } + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + } + + if v.config.requireObserverTimestamps { + verifiedSignedTimestamps, err := VerifyTimestampAuthority(entity, v.trustedMaterial) + if err != nil { + return nil, err + } + + // check threshold for both RFC3161 and log timestamps + tsCount := len(verifiedSignedTimestamps) + len(logTimestamps) + if tsCount < v.config.observerTimestampThreshold { + return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d", + tsCount, v.config.observerTimestampThreshold) + } + + // append all timestamps + verifiedTimestamps = append(verifiedTimestamps, logTimestamps...) + for _, vts := range verifiedSignedTimestamps { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: "TODO", Timestamp: vts}) + } + } + + if v.config.weDoNotExpectAnyObserverTimestamps { + // if we have a cert, let's pop the leafcert's NotBefore + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + if leafCert := verificationContent.GetCertificate(); leafCert != nil { + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "LeafCert.NotBefore", URI: "", Timestamp: leafCert.NotBefore}) + } else { + // no cert? use current time + verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()}) + } + } + + if len(verifiedTimestamps) == 0 { + return nil, fmt.Errorf("no valid observer timestamps found") + } + + return verifiedTimestamps, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go new file mode 100644 index 0000000000..e91baafde1 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go @@ -0,0 +1,210 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "context" + "crypto" + "encoding/hex" + "errors" + "fmt" + "time" + + rekorClient "github.com/sigstore/rekor/pkg/client" + rekorGeneratedClient "github.com/sigstore/rekor/pkg/generated/client" + rekorEntries "github.com/sigstore/rekor/pkg/generated/client/entries" + rekorVerify "github.com/sigstore/rekor/pkg/verify" + "github.com/sigstore/sigstore/pkg/signature" + + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/tlog" + "github.com/sigstore/sigstore-go/pkg/util" +) + +const maxAllowedTlogEntries = 32 + +// VerifyArtifactTransparencyLog verifies that the given entity has been logged +// in the transparency log and that the log entry is valid. +// +// The threshold parameter is the number of unique transparency log entries +// that must be verified. +// +// If online is true, the log entry is verified against the Rekor server. +func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime, online bool) ([]time.Time, error) { //nolint:revive + entries, err := entity.TlogEntries() + if err != nil { + return nil, err + } + + // limit the number of tlog entries to prevent DoS + if len(entries) > maxAllowedTlogEntries { + return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries) + } + + // disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(entries); i++ { + for j := i + 1; j < len(entries); j++ { + if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() { + return nil, errors.New("duplicate tlog entries found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + entitySignature := sigContent.Signature() + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + verifiedTimestamps := []time.Time{} + logEntriesVerified := 0 + + for _, entry := range entries { + err := tlog.ValidateEntry(entry) + if err != nil { + return nil, err + } + + if !online { + if !entry.HasInclusionPromise() && !entry.HasInclusionProof() { + return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise") + } + if entry.HasInclusionPromise() { + err = tlog.VerifySET(entry, trustedMaterial.RekorLogs()) + if err != nil { + // skip entries the trust root cannot verify + continue + } + if trustIntegratedTime { + verifiedTimestamps = append(verifiedTimestamps, entry.IntegratedTime()) + } + } + if entity.HasInclusionProof() { + keyID := entry.LogKeyID() + hex64Key := hex.EncodeToString([]byte(keyID)) + tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key] + if !ok { + // skip entries the trust root cannot verify + continue + } + + verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc) + if err != nil { + return nil, err + } + + err = tlog.VerifyInclusion(entry, *verifier) + if err != nil { + return nil, err + } + // DO NOT use timestamp with only an inclusion proof, because it is not signed metadata + } + } else { + keyID := entry.LogKeyID() + hex64Key := hex.EncodeToString([]byte(keyID)) + tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key] + if !ok { + // skip entries the trust root cannot verify + continue + } + + client, err := getRekorClient(tlogVerifier.BaseURL) + if err != nil { + return nil, err + } + verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc) + if err != nil { + return nil, err + } + + logIndex := entry.LogIndex() + + searchParams := rekorEntries.NewGetLogEntryByIndexParams() + searchParams.LogIndex = logIndex + + resp, err := client.Entries.GetLogEntryByIndex(searchParams) + if err != nil { + return nil, err + } + + if len(resp.Payload) == 0 { + return nil, fmt.Errorf("unable to locate log entry %d", logIndex) + } + + logEntry := resp.Payload + + for _, v := range logEntry { + v := v + err = rekorVerify.VerifyLogEntry(context.TODO(), &v, *verifier) + if err != nil { + return nil, err + } + } + if trustIntegratedTime { + verifiedTimestamps = append(verifiedTimestamps, entry.IntegratedTime()) + } + } + // Ensure entry signature matches signature from bundle + if !bytes.Equal(entry.Signature(), entitySignature) { + return nil, errors.New("transparency log signature does not match") + } + + // Ensure entry certificate matches bundle certificate + if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) { + return nil, errors.New("transparency log certificate does not match") + } + + // TODO: if you have access to artifact, check that it matches body subject + + // Check tlog entry time against bundle certificates + if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) { + return nil, errors.New("integrated time outside certificate validity") + } + + // successful log entry verification + logEntriesVerified++ + } + + if logEntriesVerified < logThreshold { + return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold) + } + + return verifiedTimestamps, nil +} + +func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) { + verifier, err := signature.LoadVerifier(publicKey, hashFunc) + if err != nil { + return nil, err + } + + return &verifier, nil +} + +func getRekorClient(baseURL string) (*rekorGeneratedClient.Rekor, error) { + client, err := rekorClient.GetRekorClient(baseURL, rekorClient.WithUserAgent(util.ConstructUserAgent())) + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go new file mode 100644 index 0000000000..966fad20af --- /dev/null +++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go @@ -0,0 +1,131 @@ +// Copyright 2023 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "bytes" + "crypto/x509" + "errors" + "fmt" + "time" + + tsaverification "github.com/sigstore/timestamp-authority/pkg/verification" + + "github.com/sigstore/sigstore-go/pkg/root" +) + +const maxAllowedTimestamps = 32 + +// VerifyTimestampAuthority verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]time.Time, error) { //nolint:revive + signedTimestamps, err := entity.Timestamps() + if err != nil { + return nil, err + } + + // limit the number of timestamps to prevent DoS + if len(signedTimestamps) > maxAllowedTimestamps { + return nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps) + } + + // disallow duplicate timestamps, as a malicious actor could use duplicates to bypass the threshold + for i := 0; i < len(signedTimestamps); i++ { + for j := i + 1; j < len(signedTimestamps); j++ { + if bytes.Equal(signedTimestamps[i], signedTimestamps[j]) { + return nil, errors.New("duplicate timestamps found") + } + } + } + + sigContent, err := entity.SignatureContent() + if err != nil { + return nil, err + } + + signatureBytes := sigContent.Signature() + + verificationContent, err := entity.VerificationContent() + if err != nil { + return nil, err + } + + verifiedTimestamps := []time.Time{} + for _, timestamp := range signedTimestamps { + verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial, verificationContent) + + // Timestamps from unknown source are okay, but don't count as verified + if err != nil { + continue + } + + verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp) + } + + return verifiedTimestamps, nil +} + +// VerifyTimestampAuthority verifies that the given entity has been timestamped +// by a trusted timestamp authority and that the timestamp is valid. +// +// The threshold parameter is the number of unique timestamps that must be +// verified. +func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]time.Time, error) { //nolint:revive + verifiedTimestamps, err := VerifyTimestampAuthority(entity, trustedMaterial) + if err != nil { + return nil, err + } + if len(verifiedTimestamps) < threshold { + return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d", len(verifiedTimestamps), threshold) + } + return verifiedTimestamps, nil +} + +func verifySignedTimestamp(signedTimestamp []byte, dsseSignatureBytes []byte, trustedMaterial root.TrustedMaterial, verificationContent VerificationContent) (time.Time, error) { + certAuthorities := trustedMaterial.TimestampingAuthorities() + + // Iterate through TSA certificate authorities to find one that verifies + for _, ca := range certAuthorities { + trustedRootVerificationOptions := tsaverification.VerifyOpts{ + Roots: []*x509.Certificate{ca.Root}, + Intermediates: ca.Intermediates, + TSACertificate: ca.Leaf, + } + + // Ensure timestamp responses are from trusted sources + timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(dsseSignatureBytes), trustedRootVerificationOptions) + if err != nil { + continue + } + + if !ca.ValidityPeriodStart.IsZero() && timestamp.Time.Before(ca.ValidityPeriodStart) { + continue + } + if !ca.ValidityPeriodEnd.IsZero() && timestamp.Time.After(ca.ValidityPeriodEnd) { + continue + } + + // Check tlog entry time against bundle certificates + // TODO: technically no longer needed since we check the cert validity period in the main Verify loop + if !verificationContent.ValidAtTime(timestamp.Time, trustedMaterial) { + continue + } + + // All above verification successful, so return nil + return timestamp.Time, nil + } + + return time.Time{}, errors.New("unable to verify signed timestamps") +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index a8b2805e64..1e2fa031be 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -20,7 +20,6 @@ import ( "crypto" "crypto/ecdsa" "crypto/ed25519" - "crypto/elliptic" "crypto/rsa" "crypto/sha1" // nolint:gosec "crypto/x509" @@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error { switch pub := first.(type) { case *rsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) + return errors.New(genErrMsg(first, second, "rsa")) } case *ecdsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) + return errors.New(genErrMsg(first, second, "ecdsa")) } case ed25519.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) + return errors.New(genErrMsg(first, second, "ed25519")) } default: return errors.New("unsupported key type") @@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string { // ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. func ValidatePubKey(pub crypto.PublicKey) error { + // goodkey policy enforces: + // * RSA + // * Size of key: 2048 <= size <= 4096, size % 8 = 0 + // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) + // * Small primes check for modulus + // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) + // * Key is easily factored with Fermat's factorization method + // * EC + // * Public key Q is not the identity element (Ø) + // * Public key Q's x and y are within [0, p-1] + // * Public key Q is on the curve + // * Public key Q's order matches the subgroups (nQ = Ø) + allowedKeys := &goodkey.AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + ECDSAP521: true, + } + cfg := &goodkey.Config{ + FermatRounds: 100, + AllowedKeys: allowedKeys, + } + p, err := goodkey.NewPolicy(cfg, nil) + if err != nil { + // Should not occur, only chances to return errors are if fermat rounds + // are <0 or when loading blocked/weak keys from disk (not used here) + return errors.New("unable to initialize key policy") + } + switch pk := pub.(type) { case *rsa.PublicKey: - // goodkey policy enforces: - // * Size of key: 2048 <= size <= 4096, size % 8 = 0 - // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) - // * Small primes check for modulus - // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) - // * Key is easily factored with Fermat's factorization method - p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) - if err != nil { - // Should not occur, only chances to return errors are if fermat rounds - // are <0 or when loading blocked/weak keys from disk (not used here) - return errors.New("unable to initialize key policy") - } // ctx is unused return p.GoodKey(context.Background(), pub) case *ecdsa.PublicKey: - // Unable to use goodkey policy because P-521 curve is not supported - return validateEcdsaKey(pk) + // ctx is unused + return p.GoodKey(context.Background(), pub) case ed25519.PublicKey: return validateEd25519Key(pk) } return errors.New("unsupported public key type") } -// Enforce that the ECDSA key curve is one of: -// * NIST P-256 (secp256r1, prime256v1) -// * NIST P-384 -// * NIST P-521. -// Other EC curves, like secp256k1, are not supported by Go. -func validateEcdsaKey(pub *ecdsa.PublicKey) error { - switch pub.Curve { - case elliptic.P224(): - return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521") - case elliptic.P256(), elliptic.P384(), elliptic.P521(): - return nil - default: - return fmt.Errorf("unexpected ec curve") - } -} - // No validations currently, ED25519 supports only one key size. func validateEd25519Key(_ ed25519.PublicKey) error { return nil diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go index de56f8e216..3dad8c34f4 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go @@ -135,8 +135,9 @@ func (d *DeviceFlowTokenGetter) deviceFlow(p *oidc.Provider, clientID, redirectU // Some providers use a secret here, we don't need for sigstore oauth one so leave it off. data := url.Values{ "grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"}, + "client_id": []string{clientID}, "device_code": []string{parsed.DeviceCode}, - "scope": []string{"openid", "email"}, + "scope": []string{"openid email"}, "code_verifier": []string{pkce.Value}, } diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index 28abcac508..c1b6ef6b7a 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -114,10 +114,24 @@ func OIDConnect(issuer, id, secret, redirectURL string, tg TokenGetter) (*OIDCID return tg.GetIDToken(provider, config) } +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`, "True", `"True"`: + *sb = true + case "false", `"false"`, "False", `"False"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + type claims struct { - Email string `json:"email"` - Verified bool `json:"email_verified"` - Subject string `json:"sub"` + Email string `json:"email"` + Verified stringAsBool `json:"email_verified"` + Subject string `json:"sub"` } // SubjectFromToken extracts the subject claim from an OIDC Identity Token @@ -129,6 +143,16 @@ func SubjectFromToken(tok *oidc.IDToken) (string, error) { return subjectFromClaims(claims) } +// SubjectFromUnverifiedToken extracts the subject claim from the raw bytes of +// an OIDC identity token. +func SubjectFromUnverifiedToken(tok []byte) (string, error) { + claims := claims{} + if err := json.Unmarshal(tok, &claims); err != nil { + return "", err + } + return subjectFromClaims(claims) +} + func subjectFromClaims(c claims) (string, error) { if c.Email != "" { if !c.Verified { diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index b96180b3b9..3fc7d84f16 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -11,7 +11,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -39,7 +39,7 @@ Many Go projects are built using Viper including: go get github.com/spf13/viper ``` -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. ## What is Viper? @@ -420,7 +420,7 @@ flags, or environment variables. Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -432,7 +432,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index c4e36c6860..b68993d412 100644 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -15,10 +15,10 @@ cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: ``` As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 78da51090b..3840614fa2 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -8,11 +8,11 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1687972261, - "narHash": "sha256-+mxvZfwMVoaZYETmuQWqTi/7T9UKoAE+WpdSQkOVJ2g=", + "lastModified": 1707817777, + "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", "owner": "cachix", "repo": "devenv", - "rev": "e85df562088573305e55906eaa964341f8cb0d9f", + "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", "type": "github" }, "original": { @@ -42,11 +42,11 @@ "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1687762428, - "narHash": "sha256-DIf7mi45PKo+s8dOYF+UlXHzE0Wl/+k3tXUyAoAnoGE=", + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "37dd7bb15791c86d55c5121740a1887ab55ee836", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", "type": "github" }, "original": { @@ -56,12 +56,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -151,11 +154,11 @@ "nixpkgs-lib": { "locked": { "dir": "lib", - "lastModified": 1685564631, - "narHash": "sha256-8ywr3AkblY4++3lIVxmrWZFzac7+f32ZEhH/A8pNscI=", + "lastModified": 1706550542, + "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4f53efe34b3a8877ac923b9350c874e3dcd5dc0a", + "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", "type": "github" }, "original": { @@ -184,27 +187,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1678872516, - "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1687886075, - "narHash": "sha256-PeayJDDDy+uw1Ats4moZnRdL1OFuZm1Tj+KiHlD67+o=", + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a565059a348422af5af9026b5174dc5c0dcefdae", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", "type": "github" }, "original": { @@ -229,11 +232,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1686050334, - "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", + "lastModified": 1704725188, + "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", + "rev": "ea96f0c05924341c551a797aaba8126334c505d2", "type": "github" }, "original": { @@ -248,6 +251,21 @@ "flake-parts": "flake-parts", "nixpkgs": "nixpkgs_2" } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 9b26c3fcf5..0230668cff 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,6 +20,7 @@ default = { languages = { go.enable = true; + go.package = pkgs.go_1_22; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 20eb4da177..da68d9944c 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -624,7 +624,7 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to // "myapp". -// Secure Remote Providers are implemented with github.com/bketelsen/crypt. +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -1791,12 +1791,6 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]any) error { - return v.unmarshalReader(in, c) -} - func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go index 507c372dc3..ff2fcd71e4 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go @@ -7,15 +7,13 @@ import ( "os" "sync" - "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v4" "github.com/spiffe/go-spiffe/v2/internal/jwtutil" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/zeebo/errs" ) -var ( - jwtbundleErr = errs.Class("jwtbundle") -) +var jwtbundleErr = errs.Class("jwtbundle") // Bundle is a collection of trusted JWT authorities for a trust domain. type Bundle struct { diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go index 56856fdf96..be176423c1 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v4" "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" "github.com/spiffe/go-spiffe/v2/internal/jwtutil" @@ -23,9 +23,7 @@ const ( jwtSVIDUse = "jwt-svid" ) -var ( - spiffebundleErr = errs.Class("spiffebundle") -) +var spiffebundleErr = errs.Class("spiffebundle") type bundleDoc struct { jose.JSONWebKeySet diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go index ffe28561c0..a70bb62fd7 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go @@ -63,13 +63,14 @@ func Read(trustDomain spiffeid.TrustDomain, r io.Reader) (*Bundle, error) { // blocks. func Parse(trustDomain spiffeid.TrustDomain, b []byte) (*Bundle, error) { bundle := New(trustDomain) + if len(b) == 0 { + return bundle, nil + } + certs, err := pemutil.ParseCertificates(b) if err != nil { return nil, x509bundleErr.New("cannot parse certificate: %v", err) } - if len(certs) == 0 { - return nil, x509bundleErr.New("no certificates found") - } for _, cert := range certs { bundle.AddX509Authority(cert) } @@ -80,13 +81,14 @@ func Parse(trustDomain spiffeid.TrustDomain, b []byte) (*Bundle, error) { // with no intermediate padding if there are more than one certificate) func ParseRaw(trustDomain spiffeid.TrustDomain, b []byte) (*Bundle, error) { bundle := New(trustDomain) + if len(b) == 0 { + return bundle, nil + } + certs, err := x509.ParseCertificates(b) if err != nil { return nil, x509bundleErr.New("cannot parse certificate: %v", err) } - if len(certs) == 0 { - return nil, x509bundleErr.New("no certificates found") - } for _, cert := range certs { bundle.AddX509Authority(cert) } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go b/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go index 7b34480cd0..8e4e210226 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go @@ -1,8 +1,10 @@ package cryptoutil import ( + "bytes" "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "fmt" ) @@ -15,6 +17,9 @@ func PublicKeyEqual(a, b crypto.PublicKey) (bool, error) { case *ecdsa.PublicKey: ecdsaPublicKey, ok := b.(*ecdsa.PublicKey) return ok && ECDSAPublicKeyEqual(a, ecdsaPublicKey), nil + case ed25519.PublicKey: + ed25519PublicKey, ok := b.(ed25519.PublicKey) + return ok && bytes.Equal(a, ed25519PublicKey), nil default: return false, fmt.Errorf("unsupported public key type %T", a) } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go index ddbfac34f7..d46f80035d 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go @@ -1,17 +1,28 @@ package jwtsvid import ( - "fmt" "time" - "github.com/go-jose/go-jose/v3" - "github.com/go-jose/go-jose/v3/jwt" + "github.com/go-jose/go-jose/v4" + "github.com/go-jose/go-jose/v4/jwt" "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/zeebo/errs" ) var ( + allowedSignatureAlgorithms = []jose.SignatureAlgorithm{ + jose.RS256, + jose.RS384, + jose.RS512, + jose.ES256, + jose.ES384, + jose.ES512, + jose.PS256, + jose.PS384, + jose.PS512, + } + jwtsvidErr = errs.Class("jwtsvid") ) @@ -90,16 +101,11 @@ func (svid *SVID) Marshal() string { func parse(token string, audience []string, getClaims tokenValidator) (*SVID, error) { // Parse serialized token - tok, err := jwt.ParseSigned(token) + tok, err := jwt.ParseSigned(token, allowedSignatureAlgorithms) if err != nil { return nil, jwtsvidErr.New("unable to parse JWT token") } - // Validates supported token signed algorithm - if err := validateTokenAlgorithm(tok); err != nil { - return nil, err - } - // Parse out the unverified claims. We need to look up the key by the trust // domain of the SPIFFE ID. var claims jwt.Claims @@ -127,8 +133,8 @@ func parse(token string, audience []string, getClaims tokenValidator) (*SVID, er // Validate the standard claims. if err := claims.Validate(jwt.Expected{ - Audience: audience, - Time: time.Now(), + AnyAudience: audience, + Time: time.Now(), }); err != nil { // Convert expected validation errors for pretty errors switch err { @@ -148,23 +154,3 @@ func parse(token string, audience []string, getClaims tokenValidator) (*SVID, er token: token, }, nil } - -// validateTokenAlgorithm json web token have only one header, and it is signed for a supported algorithm -func validateTokenAlgorithm(tok *jwt.JSONWebToken) error { - // Only one header is expected - if len(tok.Headers) != 1 { - return fmt.Errorf("expected a single token header; got %d", len(tok.Headers)) - } - - // Make sure it has an algorithm supported by JWT-SVID - alg := tok.Headers[0].Algorithm - switch jose.SignatureAlgorithm(alg) { - case jose.RS256, jose.RS384, jose.RS512, - jose.ES256, jose.ES384, jose.ES512, - jose.PS256, jose.PS384, jose.PS512: - default: - return jwtsvidErr.New("unsupported token signature algorithm %q", alg) - } - - return nil -} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go index eba43f568e..7302f3a573 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go @@ -1,8 +1,10 @@ package x509svid import ( + "bytes" "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "crypto/x509" "os" @@ -229,6 +231,9 @@ func keyMatches(privateKey crypto.PrivateKey, publicKey crypto.PublicKey) (bool, case *ecdsa.PrivateKey: ecdsaPublicKey, ok := publicKey.(*ecdsa.PublicKey) return ok && ecdsaPublicKeyEqual(&privateKey.PublicKey, ecdsaPublicKey), nil + case ed25519.PrivateKey: + ed25519PublicKey, ok := publicKey.(ed25519.PublicKey) + return ok && bytes.Equal(privateKey.Public().(ed25519.PublicKey), ed25519PublicKey), nil default: return false, errs.New("unsupported private key type %T", privateKey) } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go index b357468fad..4d5de5d59f 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go @@ -4,7 +4,6 @@ import ( "context" "crypto/x509" "errors" - "fmt" "time" "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" @@ -256,7 +255,7 @@ func (c *Client) ValidateJWTSVID(ctx context.Context, token, audience string) (* func (c *Client) newConn(ctx context.Context) (*grpc.ClientConn, error) { c.config.dialOptions = append(c.config.dialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) c.appendDialOptionsOS() - return grpc.DialContext(ctx, c.config.address, c.config.dialOptions...) + return grpc.DialContext(ctx, c.config.address, c.config.dialOptions...) //nolint:staticcheck // preserve backcompat with WithDialOptions option } func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backoff) error { @@ -489,9 +488,6 @@ func parseX509Bundle(spiffeID string, bundle []byte) (*x509bundle.Bundle, error) if err != nil { return nil, err } - if len(certs) == 0 { - return nil, fmt.Errorf("empty X.509 bundle for trust domain %q", td) - } return x509bundle.FromX509Authorities(td, certs), nil } diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE new file mode 100644 index 0000000000..85541be2e1 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 The Update Framework Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE new file mode 100644 index 0000000000..0900521996 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE @@ -0,0 +1,9 @@ +Copyright 2024 The Update Framework Authors + +Apache 2.0 License +Copyright 2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (/). + +SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go new file mode 100644 index 0000000000..cd8cc98a2c --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package config + +import ( + "net/url" + "os" + + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +type UpdaterConfig struct { + // TUF configuration + MaxRootRotations int64 + MaxDelegations int + RootMaxLength int64 + TimestampMaxLength int64 + SnapshotMaxLength int64 + TargetsMaxLength int64 + // Updater configuration + Fetcher fetcher.Fetcher + LocalTrustedRoot []byte + LocalMetadataDir string + LocalTargetsDir string + RemoteMetadataURL string + RemoteTargetsURL string + DisableLocalCache bool + PrefixTargetsWithHash bool + // UnsafeLocalMode only uses the metadata as written on disk + // if the metadata is incomplete, calling updater.Refresh will fail + UnsafeLocalMode bool +} + +// New creates a new UpdaterConfig instance used by the Updater to +// store configuration +func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) { + // Default URL for target files - /targets + targetsURL, err := url.JoinPath(remoteURL, "targets") + if err != nil { + return nil, err + } + + return &UpdaterConfig{ + // TUF configuration + MaxRootRotations: 256, + MaxDelegations: 32, + RootMaxLength: 512000, // bytes + TimestampMaxLength: 16384, // bytes + SnapshotMaxLength: 2000000, // bytes + TargetsMaxLength: 5000000, // bytes + // Updater configuration + Fetcher: &fetcher.DefaultFetcher{}, // use the default built-in download fetcher + LocalTrustedRoot: rootBytes, // trusted root.json + RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is + RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from + DisableLocalCache: false, // enable local caching of trusted metadata + PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots + UnsafeLocalMode: false, + }, nil +} + +func (cfg *UpdaterConfig) EnsurePathsExist() error { + if cfg.DisableLocalCache { + return nil + } + + for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go new file mode 100644 index 0000000000..3bd8e5ea71 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go @@ -0,0 +1,246 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "fmt" +) + +// Define TUF error types used inside the new modern implementation. +// The names chosen for TUF error types should start in 'Err' except where +// there is a good reason not to, and provide that reason in those cases. + +// Repository errors + +// ErrRepository - an error with a repository's state, such as a missing file. +// It covers all exceptions that come from the repository side when +// looking from the perspective of users of metadata API or client +type ErrRepository struct { + Msg string +} + +func (e *ErrRepository) Error() string { + return fmt.Sprintf("repository error: %s", e.Msg) +} + +func (e *ErrRepository) Is(target error) bool { + _, ok := target.(*ErrRepository) + return ok +} + +// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures +type ErrUnsignedMetadata struct { + Msg string +} + +func (e *ErrUnsignedMetadata) Error() string { + return fmt.Sprintf("unsigned metadata error: %s", e.Msg) +} + +// ErrUnsignedMetadata is a subset of ErrRepository +func (e *ErrUnsignedMetadata) Is(target error) bool { + if _, ok := target.(*ErrUnsignedMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrBadVersionNumber - An error for metadata that contains an invalid version number +type ErrBadVersionNumber struct { + Msg string +} + +func (e *ErrBadVersionNumber) Error() string { + return fmt.Sprintf("bad version number error: %s", e.Msg) +} + +// ErrBadVersionNumber is a subset of ErrRepository +func (e *ErrBadVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrEqualVersionNumber - An error for metadata containing a previously verified version number +type ErrEqualVersionNumber struct { + Msg string +} + +func (e *ErrEqualVersionNumber) Error() string { + return fmt.Sprintf("equal version number error: %s", e.Msg) +} + +// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber +func (e *ErrEqualVersionNumber) Is(target error) bool { + if _, ok := target.(*ErrEqualVersionNumber); ok { + return true + } + if _, ok := target.(*ErrBadVersionNumber); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired +type ErrExpiredMetadata struct { + Msg string +} + +func (e *ErrExpiredMetadata) Error() string { + return fmt.Sprintf("expired metadata error: %s", e.Msg) +} + +// ErrExpiredMetadata is a subset of ErrRepository +func (e *ErrExpiredMetadata) Is(target error) bool { + if _, ok := target.(*ErrExpiredMetadata); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object +type ErrLengthOrHashMismatch struct { + Msg string +} + +func (e *ErrLengthOrHashMismatch) Error() string { + return fmt.Sprintf("length/hash verification error: %s", e.Msg) +} + +// ErrLengthOrHashMismatch is a subset of ErrRepository +func (e *ErrLengthOrHashMismatch) Is(target error) bool { + if _, ok := target.(*ErrLengthOrHashMismatch); ok { + return true + } + if _, ok := target.(*ErrRepository); ok { + return true + } + return false +} + +// Download errors + +// ErrDownload - An error occurred while attempting to download a file +type ErrDownload struct { + Msg string +} + +func (e *ErrDownload) Error() string { + return fmt.Sprintf("download error: %s", e.Msg) +} + +func (e *ErrDownload) Is(target error) bool { + _, ok := target.(*ErrDownload) + return ok +} + +// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file +type ErrDownloadLengthMismatch struct { + Msg string +} + +func (e *ErrDownloadLengthMismatch) Error() string { + return fmt.Sprintf("download length mismatch error: %s", e.Msg) +} + +// ErrDownloadLengthMismatch is a subset of ErrDownload +func (e *ErrDownloadLengthMismatch) Is(target error) bool { + if _, ok := target.(*ErrDownloadLengthMismatch); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors +type ErrDownloadHTTP struct { + StatusCode int + URL string +} + +func (e *ErrDownloadHTTP) Error() string { + return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode) +} + +// ErrDownloadHTTP is a subset of ErrDownload +func (e *ErrDownloadHTTP) Is(target error) bool { + if _, ok := target.(*ErrDownloadHTTP); ok { + return true + } + if _, ok := target.(*ErrDownload); ok { + return true + } + return false +} + +// ValueError +type ErrValue struct { + Msg string +} + +func (e *ErrValue) Error() string { + return fmt.Sprintf("value error: %s", e.Msg) +} + +func (e *ErrValue) Is(err error) bool { + _, ok := err.(*ErrValue) + return ok +} + +// TypeError +type ErrType struct { + Msg string +} + +func (e *ErrType) Error() string { + return fmt.Sprintf("type error: %s", e.Msg) +} + +func (e *ErrType) Is(err error) bool { + _, ok := err.(*ErrType) + return ok +} + +// RuntimeError +type ErrRuntime struct { + Msg string +} + +func (e *ErrRuntime) Error() string { + return fmt.Sprintf("runtime error: %s", e.Msg) +} + +func (e *ErrRuntime) Is(err error) bool { + _, ok := err.(*ErrRuntime) + return ok +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go new file mode 100644 index 0000000000..dfe2bc14bf --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go @@ -0,0 +1,93 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package fetcher + +import ( + "fmt" + "io" + "net/http" + "strconv" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// Fetcher interface +type Fetcher interface { + DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) +} + +// DefaultFetcher implements Fetcher +type DefaultFetcher struct { + httpUserAgent string +} + +func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) { + d.httpUserAgent = httpUserAgent +} + +// DownloadFile downloads a file from urlPath, errors out if it failed, +// its length is larger than maxLength or the timeout is reached. +func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, timeout time.Duration) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest("GET", urlPath, nil) + if err != nil { + return nil, err + } + // Use in case of multiple sessions. + if d.httpUserAgent != "" { + req.Header.Set("User-Agent", d.httpUserAgent) + } + // Execute the request. + res, err := client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Handle HTTP status codes. + if res.StatusCode != http.StatusOK { + return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath} + } + var length int64 + // Get content length from header (might not be accurate, -1 or not set). + if header := res.Header.Get("Content-Length"); header != "" { + length, err = strconv.ParseInt(header, 10, 0) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + } + // Although the size has been checked above, use a LimitReader in case + // the reported size is inaccurate, or size is -1 which indicates an + // unknown length. We read maxLength + 1 in order to check if the read data + // surpased our set limit. + data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1)) + if err != nil { + return nil, err + } + // Error if the reported size is greater than what is expected. + length = int64(len(data)) + if length > maxLength { + return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)} + } + + return data, nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go new file mode 100644 index 0000000000..57e38612be --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + KeyTypeEd25519 = "ed25519" + KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256" + KeyTypeECDSA_SHA2_P256 = "ecdsa" + KeyTypeRSASSA_PSS_SHA256 = "rsa" + KeySchemeEd25519 = "ed25519" + KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" + KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384" + KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" +) + +// ToPublicKey generate crypto.PublicKey from metadata type Key +func (k *Key) ToPublicKey() (crypto.PublicKey, error) { + switch k.Type { + case KeyTypeRSASSA_PSS_SHA256: + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + rsaKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid rsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return nil, err + } + return rsaKey, nil + case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256 + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey)) + if err != nil { + return nil, err + } + ecdsaKey, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid ecdsa public key") + } + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return nil, err + } + return ecdsaKey, nil + case KeyTypeEd25519: + publicKey, err := hex.DecodeString(k.Value.PublicKey) + if err != nil { + return nil, err + } + ed25519Key := ed25519.PublicKey(publicKey) + // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357 + if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil { + return nil, err + } + return ed25519Key, nil + } + return nil, fmt.Errorf("unsupported public key type") +} + +// KeyFromPublicKey generate metadata type Key from crypto.PublicKey +func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) { + key := &Key{} + switch k := k.(type) { + case *rsa.PublicKey: + key.Type = KeyTypeRSASSA_PSS_SHA256 + key.Scheme = KeySchemeRSASSA_PSS_SHA256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case *ecdsa.PublicKey: + key.Type = KeyTypeECDSA_SHA2_P256 + key.Scheme = KeySchemeECDSA_SHA2_P256 + pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k) + if err != nil { + return nil, err + } + key.Value.PublicKey = string(pemKey) + case ed25519.PublicKey: + key.Type = KeyTypeEd25519 + key.Scheme = KeySchemeEd25519 + key.Value.PublicKey = hex.EncodeToString(k) + default: + return nil, fmt.Errorf("unsupported public key type") + } + return key, nil +} + +// ID returns the keyID value for the given Key +func (k *Key) ID() string { + // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key + if k.id == "" { + data, err := cjson.EncodeCanonical(k) + if err != nil { + panic(fmt.Errorf("error creating key ID: %w", err)) + } + digest := sha256.Sum256(data) + k.id = hex.EncodeToString(digest[:]) + } + return k.id +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go new file mode 100644 index 0000000000..c7cab38389 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +var log Logger = DiscardLogger{} + +// Logger partially implements the go-log/logr's interface: +// https://github.com/go-logr/logr/blob/master/logr.go +type Logger interface { + // Info logs a non-error message with key/value pairs + Info(msg string, kv ...any) + // Error logs an error with a given message and key/value pairs. + Error(err error, msg string, kv ...any) +} + +type DiscardLogger struct{} + +func (d DiscardLogger) Info(msg string, kv ...any) { +} + +func (d DiscardLogger) Error(err error, msg string, kv ...any) { +} + +func SetLogger(logger Logger) { + log = logger +} + +func GetLogger() Logger { + return log +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go new file mode 100644 index 0000000000..bd3f1e44bd --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go @@ -0,0 +1,567 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/hex" + "encoding/json" + "errors" +) + +// The following marshal/unmarshal methods override the default behavior for for each TUF type +// in order to support unrecognized fields + +func (signed RootType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["consistent_snapshot"] = signed.ConsistentSnapshot + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["keys"] = signed.Keys + dict["roles"] = signed.Roles + return json.Marshal(dict) +} + +func (signed *RootType) UnmarshalJSON(data []byte) error { + type Alias RootType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = RootType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "consistent_snapshot") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "keys") + delete(dict, "roles") + signed.UnrecognizedFields = dict + return nil +} + +func (signed SnapshotType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *SnapshotType) UnmarshalJSON(data []byte) error { + type Alias SnapshotType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = SnapshotType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TimestampType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["meta"] = signed.Meta + return json.Marshal(dict) +} + +func (signed *TimestampType) UnmarshalJSON(data []byte) error { + type Alias TimestampType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TimestampType(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "meta") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetsType) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["_type"] = signed.Type + dict["spec_version"] = signed.SpecVersion + dict["version"] = signed.Version + dict["expires"] = signed.Expires + dict["targets"] = signed.Targets + if signed.Delegations != nil { + dict["delegations"] = signed.Delegations + } + return json.Marshal(dict) +} + +func (signed *TargetsType) UnmarshalJSON(data []byte) error { + type Alias TargetsType + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetsType(s) + + // populate the path field for each target + for name, targetFile := range signed.Targets { + targetFile.Path = name + } + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "_type") + delete(dict, "spec_version") + delete(dict, "version") + delete(dict, "expires") + delete(dict, "targets") + delete(dict, "delegations") + signed.UnrecognizedFields = dict + return nil +} + +func (signed MetaFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + // length and hashes are optional + if signed.Length != 0 { + dict["length"] = signed.Length + } + if len(signed.Hashes) != 0 { + dict["hashes"] = signed.Hashes + } + dict["version"] = signed.Version + return json.Marshal(dict) +} + +func (signed *MetaFiles) UnmarshalJSON(data []byte) error { + type Alias MetaFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = MetaFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "version") + signed.UnrecognizedFields = dict + return nil +} + +func (signed TargetFiles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(signed.UnrecognizedFields) != 0 { + copyMapValues(signed.UnrecognizedFields, dict) + } + dict["length"] = signed.Length + dict["hashes"] = signed.Hashes + if signed.Custom != nil { + dict["custom"] = signed.Custom + } + return json.Marshal(dict) +} + +func (signed *TargetFiles) UnmarshalJSON(data []byte) error { + type Alias TargetFiles + var s Alias + if err := json.Unmarshal(data, &s); err != nil { + return err + } + *signed = TargetFiles(s) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "length") + delete(dict, "hashes") + delete(dict, "custom") + signed.UnrecognizedFields = dict + return nil +} + +func (key Key) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(key.UnrecognizedFields) != 0 { + copyMapValues(key.UnrecognizedFields, dict) + } + dict["keytype"] = key.Type + dict["scheme"] = key.Scheme + dict["keyval"] = key.Value + return json.Marshal(dict) +} + +func (key *Key) UnmarshalJSON(data []byte) error { + type Alias Key + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + // nolint + *key = Key(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keytype") + delete(dict, "scheme") + delete(dict, "keyval") + key.UnrecognizedFields = dict + return nil +} + +func (meta Metadata[T]) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(meta.UnrecognizedFields) != 0 { + copyMapValues(meta.UnrecognizedFields, dict) + } + dict["signed"] = meta.Signed + dict["signatures"] = meta.Signatures + return json.Marshal(dict) +} + +func (meta *Metadata[T]) UnmarshalJSON(data []byte) error { + tmp := any(new(T)) + var m map[string]any + if err := json.Unmarshal(data, &m); err != nil { + return err + } + switch tmp.(type) { + case *RootType: + dict := struct { + Signed RootType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *SnapshotType: + dict := struct { + Signed SnapshotType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TimestampType: + dict := struct { + Signed TimestampType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + case *TargetsType: + dict := struct { + Signed TargetsType `json:"signed"` + Signatures []Signature `json:"signatures"` + }{} + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + var i interface{} = dict.Signed + meta.Signed = i.(T) + meta.Signatures = dict.Signatures + default: + return &ErrValue{Msg: "unrecognized metadata type"} + } + delete(m, "signed") + delete(m, "signatures") + meta.UnrecognizedFields = m + return nil +} + +func (s Signature) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(s.UnrecognizedFields) != 0 { + copyMapValues(s.UnrecognizedFields, dict) + } + dict["keyid"] = s.KeyID + dict["sig"] = s.Signature + return json.Marshal(dict) +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + type Alias Signature + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *s = Signature(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyid") + delete(dict, "sig") + s.UnrecognizedFields = dict + return nil +} + +func (kv KeyVal) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(kv.UnrecognizedFields) != 0 { + copyMapValues(kv.UnrecognizedFields, dict) + } + dict["public"] = kv.PublicKey + return json.Marshal(dict) +} + +func (kv *KeyVal) UnmarshalJSON(data []byte) error { + type Alias KeyVal + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *kv = KeyVal(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "public") + kv.UnrecognizedFields = dict + return nil +} + +func (role Role) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + return json.Marshal(dict) +} + +func (role *Role) UnmarshalJSON(data []byte) error { + type Alias Role + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = Role(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + role.UnrecognizedFields = dict + return nil +} + +func (d Delegations) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(d.UnrecognizedFields) != 0 { + copyMapValues(d.UnrecognizedFields, dict) + } + // only one is allowed + dict["keys"] = d.Keys + if d.Roles != nil { + dict["roles"] = d.Roles + } else if d.SuccinctRoles != nil { + dict["succinct_roles"] = d.SuccinctRoles + } + return json.Marshal(dict) +} + +func (d *Delegations) UnmarshalJSON(data []byte) error { + type Alias Delegations + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *d = Delegations(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keys") + delete(dict, "roles") + delete(dict, "succinct_roles") + d.UnrecognizedFields = dict + return nil +} + +func (role DelegatedRole) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["name"] = role.Name + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["terminating"] = role.Terminating + // make sure we have only one of the two (per spec) + if role.Paths != nil && role.PathHashPrefixes != nil { + return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"} + } + if role.Paths != nil { + dict["paths"] = role.Paths + } else if role.PathHashPrefixes != nil { + dict["path_hash_prefixes"] = role.PathHashPrefixes + } + return json.Marshal(dict) +} + +func (role *DelegatedRole) UnmarshalJSON(data []byte) error { + type Alias DelegatedRole + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = DelegatedRole(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "name") + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "terminating") + delete(dict, "paths") + delete(dict, "path_hash_prefixes") + role.UnrecognizedFields = dict + return nil +} + +func (role SuccinctRoles) MarshalJSON() ([]byte, error) { + dict := map[string]any{} + if len(role.UnrecognizedFields) != 0 { + copyMapValues(role.UnrecognizedFields, dict) + } + dict["keyids"] = role.KeyIDs + dict["threshold"] = role.Threshold + dict["bit_length"] = role.BitLength + dict["name_prefix"] = role.NamePrefix + return json.Marshal(dict) +} + +func (role *SuccinctRoles) UnmarshalJSON(data []byte) error { + type Alias SuccinctRoles + var a Alias + if err := json.Unmarshal(data, &a); err != nil { + return err + } + *role = SuccinctRoles(a) + + var dict map[string]any + if err := json.Unmarshal(data, &dict); err != nil { + return err + } + delete(dict, "keyids") + delete(dict, "threshold") + delete(dict, "bit_length") + delete(dict, "name_prefix") + role.UnrecognizedFields = dict + return nil +} + +func (b *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("tuf: invalid JSON hex bytes") + } + res := make([]byte, hex.DecodedLen(len(data)-2)) + _, err := hex.Decode(res, data[1:len(data)-1]) + if err != nil { + return err + } + *b = res + return nil +} + +func (b HexBytes) MarshalJSON() ([]byte, error) { + res := make([]byte, hex.EncodedLen(len(b))+2) + res[0] = '"' + res[len(res)-1] = '"' + hex.Encode(res[1:], b) + return res, nil +} + +func (b HexBytes) String() string { + return hex.EncodeToString(b) +} + +// copyMapValues copies the values of the src map to dst +func copyMapValues(src, dst map[string]any) { + for k, v := range src { + dst[k] = v + } +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go new file mode 100644 index 0000000000..3e0a9e1ab0 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go @@ -0,0 +1,922 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "time" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/sigstore/sigstore/pkg/signature" +) + +// Root return new metadata instance of type Root +func Root(expires ...time.Time) *Metadata[RootType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + // populate Roles + roles := map[string]*Role{} + for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} { + roles[r] = &Role{ + KeyIDs: []string{}, + Threshold: 1, + } + } + log.Info("Created metadata", "type", ROOT) + return &Metadata[RootType]{ + Signed: RootType{ + Type: ROOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Keys: map[string]*Key{}, + Roles: roles, + ConsistentSnapshot: true, + }, + Signatures: []Signature{}, + } +} + +// Snapshot return new metadata instance of type Snapshot +func Snapshot(expires ...time.Time) *Metadata[SnapshotType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", SNAPSHOT) + return &Metadata[SnapshotType]{ + Signed: SnapshotType{ + Type: SNAPSHOT, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "targets.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Timestamp return new metadata instance of type Timestamp +func Timestamp(expires ...time.Time) *Metadata[TimestampType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TIMESTAMP) + return &Metadata[TimestampType]{ + Signed: TimestampType{ + Type: TIMESTAMP, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Meta: map[string]*MetaFiles{ + "snapshot.json": { + Version: 1, + }, + }, + }, + Signatures: []Signature{}, + } +} + +// Targets return new metadata instance of type Targets +func Targets(expires ...time.Time) *Metadata[TargetsType] { + // expire now if there's nothing set + if len(expires) == 0 { + expires = []time.Time{time.Now().UTC()} + } + log.Info("Created metadata", "type", TARGETS) + return &Metadata[TargetsType]{ + Signed: TargetsType{ + Type: TARGETS, + SpecVersion: SPECIFICATION_VERSION, + Version: 1, + Expires: expires[0], + Targets: map[string]*TargetFiles{}, + }, + Signatures: []Signature{}, + } +} + +// TargetFile return new metadata instance of type TargetFiles +func TargetFile() *TargetFiles { + return &TargetFiles{ + Length: 0, + Hashes: Hashes{}, + } +} + +// MetaFile return new metadata instance of type MetaFile +func MetaFile(version int64) *MetaFiles { + if version < 1 { + // attempting to set incorrect version + log.Info("Attempting to set incorrect version for MetaFile", "version", version) + version = 1 + } + return &MetaFiles{ + Length: 0, + Hashes: Hashes{}, + Version: version, + } +} + +// FromFile load metadata from file +func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) { + in, err := os.Open(name) + if err != nil { + return nil, err + } + defer in.Close() + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from file", "name", name) + return meta, nil +} + +// FromBytes deserialize metadata from bytes +func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) { + m, err := fromBytes[T](data) + if err != nil { + return nil, err + } + *meta = *m + log.Info("Loaded metadata from bytes") + return meta, nil +} + +// ToBytes serialize metadata to bytes +func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) { + log.Info("Writing metadata to bytes") + if pretty { + return json.MarshalIndent(*meta, "", "\t") + } + return json.Marshal(*meta) +} + +// ToFile save metadata to file +func (meta *Metadata[T]) ToFile(name string, pretty bool) error { + log.Info("Writing metadata to file", "name", name) + data, err := meta.ToBytes(pretty) + if err != nil { + return err + } + return os.WriteFile(name, data, 0644) +} + +// Sign create signature over Signed and assign it to Signatures +func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) { + // encode the Signed part to canonical JSON so signatures are consistent + payload, err := cjson.EncodeCanonical(meta.Signed) + if err != nil { + return nil, err + } + // sign the Signed part + sb, err := signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"} + } + // get the signer's PublicKey + publ, err := signer.PublicKey() + if err != nil { + return nil, err + } + // convert to TUF Key type to get keyID + key, err := KeyFromPublicKey(publ) + if err != nil { + return nil, err + } + // build signature + sig := &Signature{ + KeyID: key.ID(), + Signature: sb, + } + // update the Signatures part + meta.Signatures = append(meta.Signatures, *sig) + // return the new signature + log.Info("Signed metadata with key", "ID", key.ID()) + return sig, nil +} + +// VerifyDelegate verifies that delegatedMetadata is signed with the required +// threshold of keys for the delegated role delegatedRole +func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error { + i := any(meta) + signingKeys := map[string]bool{} + var keys map[string]*Key + var roleKeyIDs []string + var roleThreshold int + + log.Info("Verifying", "role", delegatedRole) + + // collect keys, keyIDs and threshold based on delegator type + switch i := i.(type) { + // Root delegator + case *Metadata[RootType]: + keys = i.Signed.Keys + if role, ok := (*i).Signed.Roles[delegatedRole]; ok { + roleKeyIDs = role.KeyIDs + roleThreshold = role.Threshold + } else { + // the delegated role was not found, no need to proceed + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // Targets delegator + case *Metadata[TargetsType]: + if i.Signed.Delegations == nil { + return &ErrValue{Msg: "no delegations found"} + } + keys = i.Signed.Delegations.Keys + if i.Signed.Delegations.Roles != nil { + found := false + for _, v := range i.Signed.Delegations.Roles { + if v.Name == delegatedRole { + found = true + roleKeyIDs = v.KeyIDs + roleThreshold = v.Threshold + break + } + } + // the delegated role was not found, no need to proceed + if !found { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + } else if i.Signed.Delegations.SuccinctRoles != nil { + roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs + roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold + } + default: + return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"} + } + // if there are no keyIDs for that role it means there's no delegation found + if len(roleKeyIDs) == 0 { + return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)} + } + // loop through each role keyID + for _, keyID := range roleKeyIDs { + key, ok := keys[keyID] + if !ok { + return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)} + } + sign := Signature{} + var payload []byte + // convert to a PublicKey type + publicKey, err := key.ToPublicKey() + if err != nil { + return err + } + // use corresponding hash function for key type + hash := crypto.Hash(0) + if key.Type != KeyTypeEd25519 { + switch key.Scheme { + case KeySchemeECDSA_SHA2_P256: + hash = crypto.SHA256 + case KeySchemeECDSA_SHA2_P384: + hash = crypto.SHA384 + default: + hash = crypto.SHA256 + } + } + // load a verifier based on that key + verifier, err := signature.LoadVerifier(publicKey, hash) + if err != nil { + return err + } + // collect the signature for that key and build the payload we'll verify + // based on the Signed part of the delegated metadata + switch d := delegatedMetadata.(type) { + case *Metadata[RootType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[SnapshotType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TimestampType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + case *Metadata[TargetsType]: + for _, signature := range d.Signatures { + if signature.KeyID == keyID { + sign = signature + } + } + payload, err = cjson.EncodeCanonical(d.Signed) + if err != nil { + return err + } + default: + return &ErrType{Msg: "unknown delegated metadata type"} + } + // verify if the signature for that payload corresponds to the given key + if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil { + // failed to verify the metadata with that key ID + log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID) + } else { + // save the verified keyID only if verification passed + signingKeys[keyID] = true + log.Info("Verified with key", "role", delegatedRole, "ID", keyID) + } + } + // check if the amount of valid signatures is enough + if len(signingKeys) < roleThreshold { + log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold) + return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)} + } + log.Info("Verified successfully", "role", delegatedRole) + return nil +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *RootType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TimestampType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// IsExpired returns true if metadata is expired. +// It checks if referenceTime is after Signed.Expires +func (signed *TargetsType) IsExpired(referenceTime time.Time) bool { + return referenceTime.After(signed.Expires) +} + +// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding +// length and hashes +func (f *MetaFiles) VerifyLengthHashes(data []byte) error { + // hashes and length are optional for MetaFiles + if len(f.Hashes) > 0 { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + } + if f.Length != 0 { + err := verifyLength(data, f.Length) + if err != nil { + return err + } + } + return nil +} + +// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding +// length and hashes +func (f *TargetFiles) VerifyLengthHashes(data []byte) error { + err := verifyHashes(data, f.Hashes) + if err != nil { + return err + } + err = verifyLength(data, f.Length) + if err != nil { + return err + } + return nil +} + +// Equal checks whether the source target file matches another +func (source *TargetFiles) Equal(expected TargetFiles) bool { + if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) { + return true + } + return false +} + +// FromFile generate TargetFiles from file +func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from file", "path", localPath) + // open file + in, err := os.Open(localPath) + if err != nil { + return nil, err + } + defer in.Close() + // read file + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + return t.FromBytes(localPath, data, hashes...) +} + +// FromBytes generate TargetFiles from bytes +func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) { + log.Info("Generating target file from bytes", "path", localPath) + var hasher hash.Hash + targetFile := &TargetFiles{ + Hashes: map[string]HexBytes{}, + } + // use default hash algorithm if not set + if len(hashes) == 0 { + hashes = []string{"sha256"} + } + // calculate length + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return nil, err + } + targetFile.Length = len + for _, v := range hashes { + switch v { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)} + } + _, err := hasher.Write(data) + if err != nil { + return nil, err + } + targetFile.Hashes[v] = hasher.Sum(nil) + } + targetFile.Path = localPath + return targetFile, nil +} + +// ClearSignatures clears Signatures +func (meta *Metadata[T]) ClearSignatures() { + log.Info("Cleared signatures") + meta.Signatures = []Signature{} +} + +// IsDelegatedPath determines whether the given "targetFilepath" is in one of +// the paths that "DelegatedRole" is trusted to provide +func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) { + if len(role.Paths) > 0 { + // standard delegations + for _, pathPattern := range role.Paths { + // A delegated role path may be an explicit path or glob + // pattern (Unix shell-style wildcards). + if isTargetInPathPattern(targetFilepath, pathPattern) { + return true, nil + } + } + } else if len(role.PathHashPrefixes) > 0 { + // hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target. + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + for _, pathHashPrefix := range role.PathHashPrefixes { + if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) { + return true, nil + } + } + } + return false, nil +} + +// Determine whether “targetpath“ matches the “pathpattern“. +func isTargetInPathPattern(targetpath string, pathpattern string) bool { + // We need to make sure that targetpath and pathpattern are pointing to + // the same directory as fnmatch doesn't threat "/" as a special symbol. + targetParts := strings.Split(targetpath, "/") + patternParts := strings.Split(pathpattern, "/") + if len(targetParts) != len(patternParts) { + return false + } + + // Every part in the pathpattern could include a glob pattern, that's why + // each of the target and pathpattern parts should match. + for i := 0; i < len(targetParts); i++ { + if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok { + return false + } + } + + return true +} + +// GetRolesForTarget return the names and terminating status of all +// delegated roles who are responsible for targetFilepath +// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j +func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult { + var res []RoleResult + // Standard delegations + if role.Roles != nil { + for _, r := range role.Roles { + ok, err := r.IsDelegatedPath(targetFilepath) + if err == nil && ok { + res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating}) + } + } + } else if role.SuccinctRoles != nil { + // SuccinctRoles delegations + res = role.SuccinctRoles.GetRolesForTarget(targetFilepath) + } + // We preserve the same order as the actual roles list + return res +} + +// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath". +// The target at path "targetFilepath" is assigned to a bin by casting +// the left-most "BitLength" of bits of the file path hash digest to +// int, using it as bin index between 0 and “2**BitLength-1”. +func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult { + // calculate the suffixLen value based on the total number of bins in + // hex. If bit_length = 10 then numberOfBins = 1024 or bin names will + // have a suffix between "000" and "3ff" in hex and suffixLen will be 3 + // meaning the third bin will have a suffix of "003" + numberOfBins := math.Pow(2, float64(role.BitLength)) + // suffixLen is calculated based on "numberOfBins - 1" as the name + // of the last bin contains the number "numberOfBins -1" as a suffix. + suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16)) + + targetFilepathHash := sha256.Sum256([]byte(targetFilepath)) + // we can't ever need more than 4 bytes (32 bits) + hashBytes := targetFilepathHash[:4] + + // right shift hash bytes, so that we only have the leftmost + // bit_length bits that we care about + shiftValue := 32 - role.BitLength + binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue + // add zero padding if necessary and cast to hex the suffix + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + // we consider all succinct_roles as terminating. + // for more information, read TAP 15. + return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}} +} + +// GetRoles returns the names of all different delegated roles +func (role *SuccinctRoles) GetRoles() []string { + res := []string{} + suffixLen, numberOfBins := role.GetSuffixLen() + + for binNumber := 0; binNumber < numberOfBins; binNumber++ { + suffix := fmt.Sprintf("%0*x", suffixLen, binNumber) + res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix)) + } + return res +} + +func (role *SuccinctRoles) GetSuffixLen() (int, int) { + numberOfBins := int(math.Pow(2, float64(role.BitLength))) + return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins +} + +// IsDelegatedRole returns whether the given roleName is in one of +// the delegated roles that “SuccinctRoles“ represents +func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool { + suffixLen, numberOfBins := role.GetSuffixLen() + + expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix) + + // check if the roleName prefix is what we would expect + if !strings.HasPrefix(roleName, expectedPrefix) { + return false + } + + // check if the roleName suffix length is what we would expect + suffix := roleName[len(expectedPrefix):] + if len(suffix) != suffixLen { + return false + } + + // make sure suffix is hex value and get bin number + value, err := strconv.ParseInt(suffix, 16, 64) + if err != nil { + return false + } + + // check if the bin we calculated is indeed within the range of what we support + return (value >= 0) && (value < int64(numberOfBins)) +} + +// AddKey adds new signing key for delegated role "role" +// keyID: Identifier of the key to be added for “role“. +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +func (signed *RootType) AddKey(key *Key, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // add keyID to role + if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) { + signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID()) + } + // update Keys + signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revoke key from “role“ and updates the Keys store. +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *RootType) RevokeKey(keyID, role string) error { + // verify role is present + if _, ok := signed.Roles[role]; !ok { + return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)} + } + // verify keyID is present for given role + if !slices.Contains(signed.Roles[role].KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Roles[role].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice + signed.Roles[role].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Keys, keyID) + return nil +} + +// AddKey adds new signing key for delegated role "role" +// key: Signing key to be added for “role“. +// role: Name of the role, for which “key“ is added. +// If SuccinctRoles is used then the "role" argument can be ignored. +func (signed *TargetsType) AddKey(key *Key, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + isDelegatedRole := false + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + isDelegatedRole = true + // add key if keyID is not already part of keyIDs for that role + if !slices.Contains(d.KeyIDs, key.ID()) { + signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("Delegated role already has keyID", "role", role, "ID", key.ID()) + } + } + if !isDelegatedRole { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + } else if signed.Delegations.SuccinctRoles != nil { + // add key if keyID is not already part of keyIDs for the SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) { + signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil + } + log.Info("SuccinctRoles role already has keyID", "ID", key.ID()) + + } + signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value? + return nil +} + +// RevokeKey revokes key from delegated role "role" and updates the delegations key store +// keyID: Identifier of the key to be removed for “role“. +// role: Name of the role, for which a signing key is removed. +func (signed *TargetsType) RevokeKey(keyID string, role string) error { + // check if Delegations are even present + if signed.Delegations == nil { + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } + // standard delegated roles + if signed.Delegations.Roles != nil { + // loop through all delegated roles + for i, d := range signed.Delegations.Roles { + // if role is found + if d.Name == role { + // check if keyID is present in keyIDs for that role + if !slices.Contains(d.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)} + } + // remove keyID from role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.Roles[i].KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for that role + signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs + // check if keyID is used by other roles too + for _, r := range signed.Delegations.Roles { + if slices.Contains(r.KeyIDs, keyID) { + return nil + } + } + // delete the keyID from Keys if it's not used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + } + // we haven't found the delegated role + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} + } else if signed.Delegations.SuccinctRoles != nil { + // check if keyID is used by SuccinctRoles role + if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) { + return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)} + } + // remove keyID from the SuccinctRoles role + filteredKeyIDs := []string{} + for _, k := range signed.Delegations.SuccinctRoles.KeyIDs { + if k != keyID { + filteredKeyIDs = append(filteredKeyIDs, k) + } + } + // overwrite the old keyID slice for SuccinctRoles role + signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs + + // delete the keyID from Keys since it can not be used anywhere else + delete(signed.Delegations.Keys, keyID) + return nil + } + return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)} +} + +// Equal checks whether one hash set equals another +func (source Hashes) Equal(expected Hashes) bool { + hashChecked := false + for typ, hash := range expected { + if h, ok := source[typ]; ok { + // hash type match found + hashChecked = true + if !hmac.Equal(h, hash) { + // hash values don't match + return false + } + } + } + return hashChecked +} + +// verifyLength verifies if the passed data has the corresponding length +func verifyLength(data []byte, length int64) error { + len, err := io.Copy(io.Discard, bytes.NewReader(data)) + if err != nil { + return err + } + if length != len { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)} + } + return nil +} + +// verifyHashes verifies if the hash of the passed data corresponds to it +func verifyHashes(data []byte, hashes Hashes) error { + var hasher hash.Hash + for k, v := range hashes { + switch k { + case "sha256": + hasher = sha256.New() + case "sha512": + hasher = sha512.New() + default: + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)} + } + hasher.Write(data) + if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) { + return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)} + } + } + return nil +} + +// fromBytes return a *Metadata[T] object from bytes and verifies +// that the data corresponds to the caller struct type +func fromBytes[T Roles](data []byte) (*Metadata[T], error) { + meta := &Metadata[T]{} + // verify that the type we used to create the object is the same as the type of the metadata file + if err := checkType[T](data); err != nil { + return nil, err + } + // if all is okay, unmarshal meta to the desired Metadata[T] type + if err := json.Unmarshal(data, meta); err != nil { + return nil, err + } + // Make sure signature key IDs are unique + if err := checkUniqueSignatures(*meta); err != nil { + return nil, err + } + return meta, nil +} + +// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata +func checkUniqueSignatures[T Roles](meta Metadata[T]) error { + signatures := []string{} + for _, sig := range meta.Signatures { + if slices.Contains(signatures, sig.KeyID) { + return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)} + } + signatures = append(signatures, sig.KeyID) + } + return nil +} + +// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes +func checkType[T Roles](data []byte) error { + var m map[string]any + i := any(new(T)) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + signedType := m["signed"].(map[string]any)["_type"].(string) + switch i.(type) { + case *RootType: + if ROOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)} + } + case *SnapshotType: + if SNAPSHOT != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)} + } + case *TimestampType: + if TIMESTAMP != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)} + } + case *TargetsType: + if TARGETS != signedType { + return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)} + } + default: + return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)} + } + // all okay + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go new file mode 100644 index 0000000000..0726f31f88 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go @@ -0,0 +1,357 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package trustedmetadata + +import ( + "fmt" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// TrustedMetadata struct for storing trusted metadata +type TrustedMetadata struct { + Root *metadata.Metadata[metadata.RootType] + Snapshot *metadata.Metadata[metadata.SnapshotType] + Timestamp *metadata.Metadata[metadata.TimestampType] + Targets map[string]*metadata.Metadata[metadata.TargetsType] + RefTime time.Time +} + +// New creates a new TrustedMetadata instance which ensures that the +// collection of metadata in it is valid and trusted through the whole +// client update workflow. It provides easy ways to update the metadata +// with the caller making decisions on what is updated +func New(rootData []byte) (*TrustedMetadata, error) { + res := &TrustedMetadata{ + Targets: map[string]*metadata.Metadata[metadata.TargetsType]{}, + RefTime: time.Now().UTC(), + } + // load and validate the local root metadata + // valid initial trusted root metadata is required + err := res.loadTrustedRoot(rootData) + if err != nil { + return nil, err + } + return res, nil +} + +// UpdateRoot verifies and loads “rootData“ as new root metadata. +// Note that an expired intermediate root is considered valid: expiry is +// only checked for the final root in UpdateTimestamp() +func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"} + } + log.Info("Updating root") + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return nil, err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify that new root is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // verify version + if newRoot.Signed.Version != trusted.Root.Signed.Version+1 { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)} + } + // verify that new root is signed by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return nil, err + } + // save root if verified + trusted.Root = newRoot + log.Info("Updated root", "version", trusted.Root.Signed.Version) + return trusted.Root, nil +} + +// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata. +// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata" +// will error in this case but the intermediate timestamp will be loaded. +// This way a newer timestamp can still be loaded (and the intermediate +// timestamp will be used for rollback protection). Expired timestamp will +// prevent loading snapshot metadata. +func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) { + log := metadata.GetLogger() + + if trusted.Snapshot != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"} + } + // client workflow 5.3.10: Make sure final root is not expired. + if trusted.Root.Signed.IsExpired(trusted.RefTime) { + // no need to check for 5.3.11 (fast forward attack recovery): + // timestamp/snapshot can not yet be loaded at this point + return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"} + } + log.Info("Updating timestamp") + newTimestamp, err := metadata.Timestamp().FromBytes(timestampData) + if err != nil { + return nil, err + } + // check metadata type matches timestamp + if newTimestamp.Signed.Type != metadata.TIMESTAMP { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)} + } + // verify that new timestamp is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp) + if err != nil { + return nil, err + } + // if an existing trusted timestamp is updated, + // check for a rollback attack + if trusted.Timestamp != nil { + // prevent rolling back timestamp version + if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // keep using old timestamp if versions are equal + if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version { + log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version) + return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)} + } + // prevent rolling back snapshot version + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if newSnapshotMeta.Version < snapshotMeta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)} + } + } + // expiry not checked to allow old timestamp to be used for rollback + // protection of new timestamp: expiry is checked in UpdateSnapshot() + // save root if verified + trusted.Timestamp = newTimestamp + log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version) + + // timestamp is loaded: error if it is not valid _final_ timestamp + err = trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + // all okay + return trusted.Timestamp, nil +} + +// checkFinalTimestamp verifies if trusted timestamp is not expired +func (trusted *TrustedMetadata) checkFinalTimestamp() error { + if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"} + } + return nil +} + +// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata. +// Note that an intermediate snapshot is allowed to be expired and version +// is allowed to not match timestamp meta version: TrustedMetadata +// will error for case of expired metadata or when using bad versions but the +// intermediate snapshot will be loaded. This way a newer snapshot can still +// be loaded (and the intermediate snapshot will be used for rollback protection). +// Expired snapshot or snapshot that does not match timestamp meta version will +// prevent loading targets. +func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) { + log := metadata.GetLogger() + + if trusted.Timestamp == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"} + } + if trusted.Targets[metadata.TARGETS] != nil { + return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"} + } + log.Info("Updating snapshot") + + // snapshot cannot be loaded if final timestamp is expired + err := trusted.checkFinalTimestamp() + if err != nil { + return nil, err + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // verify non-trusted data against the hashes in timestamp, if any. + // trusted snapshot data has already been verified once. + if !isTrusted { + err = snapshotMeta.VerifyLengthHashes(snapshotData) + if err != nil { + return nil, err + } + } + newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData) + if err != nil { + return nil, err + } + // check metadata type matches snapshot + if newSnapshot.Signed.Type != metadata.SNAPSHOT { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)} + } + // verify that new snapshot is signed by trusted root + err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot) + if err != nil { + return nil, err + } + + // version not checked against meta version to allow old snapshot to be + // used in rollback protection: it is checked when targets is updated + + // if an existing trusted snapshot is updated, check for rollback attack + if trusted.Snapshot != nil { + for name, info := range trusted.Snapshot.Signed.Meta { + newFileInfo, ok := newSnapshot.Signed.Meta[name] + // prevent removal of any metadata in meta + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)} + } + // prevent rollback of any metadata versions + if newFileInfo.Version < info.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)} + } + } + } + + // expiry not checked to allow old snapshot to be used for rollback + // protection of new snapshot: it is checked when targets is updated + trusted.Snapshot = newSnapshot + log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version) + + // snapshot is loaded, but we error if it's not valid _final_ snapshot + err = trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // all okay + return trusted.Snapshot, nil +} + +// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version +func (trusted *TrustedMetadata) checkFinalSnapshot() error { + if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) { + return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"} + } + snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + if trusted.Snapshot.Signed.Version != snapshotMeta.Version { + return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)} + } + return nil +} + +// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata. +func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) { + return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT) +} + +// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“ +func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + + var ok bool + if trusted.Snapshot == nil { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"} + } + // targets cannot be loaded if final snapshot is expired or its version + // does not match meta version in timestamp + err := trusted.checkFinalSnapshot() + if err != nil { + return nil, err + } + // check if delegator metadata is present + if delegatorName == metadata.ROOT { + if trusted.Root != nil { + ok = true + } else { + ok = false + } + } else { + _, ok = trusted.Targets[delegatorName] + } + if !ok { + return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"} + } + log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName) + // Verify against the hashes in snapshot, if any + meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + if !ok { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)} + } + err = meta.VerifyLengthHashes(targetsData) + if err != nil { + return nil, err + } + newDelegate, err := metadata.Targets().FromBytes(targetsData) + if err != nil { + return nil, err + } + // check metadata type matches targets + if newDelegate.Signed.Type != metadata.TARGETS { + return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)} + } + // get delegator metadata and verify the new delegatee + if delegatorName == metadata.ROOT { + err = trusted.Root.VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } else { + err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate) + if err != nil { + return nil, err + } + } + // check versions + if newDelegate.Signed.Version != meta.Version { + return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)} + } + // check expiration + if newDelegate.Signed.IsExpired(trusted.RefTime) { + return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)} + } + trusted.Targets[roleName] = newDelegate + log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version) + return trusted.Targets[roleName], nil +} + +// loadTrustedRoot verifies and loads "data" as trusted root metadata. +// Note that an expired initial root is considered valid: expiry is +// only checked for the final root in “UpdateTimestamp()“. +func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error { + log := metadata.GetLogger() + + // generate root metadata + newRoot, err := metadata.Root().FromBytes(rootData) + if err != nil { + return err + } + // check metadata type matches root + if newRoot.Signed.Type != metadata.ROOT { + return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)} + } + // verify root by itself + err = newRoot.VerifyDelegate(metadata.ROOT, newRoot) + if err != nil { + return err + } + // save root if verified + trusted.Root = newRoot + log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version) + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go new file mode 100644 index 0000000000..0bd86ee8f2 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go @@ -0,0 +1,179 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "time" +) + +// Generic type constraint +type Roles interface { + RootType | SnapshotType | TimestampType | TargetsType +} + +// Define version of the TUF specification +const ( + SPECIFICATION_VERSION = "1.0.31" +) + +// Define top level role names +const ( + ROOT = "root" + SNAPSHOT = "snapshot" + TARGETS = "targets" + TIMESTAMP = "timestamp" +) + +var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS} + +// Metadata[T Roles] represents a TUF metadata. +// Provides methods to read and write to and +// from file and bytes, also to create, verify and clear metadata signatures. +type Metadata[T Roles] struct { + Signed T `json:"signed"` + Signatures []Signature `json:"signatures"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Signature represents the Signature part of a TUF metadata +type Signature struct { + KeyID string `json:"keyid"` + Signature HexBytes `json:"sig"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RootType represents the Signed portion of a root metadata +type RootType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + ConsistentSnapshot bool `json:"consistent_snapshot"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Keys map[string]*Key `json:"keys"` + Roles map[string]*Role `json:"roles"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SnapshotType represents the Signed portion of a snapshot metadata +type SnapshotType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetsType represents the Signed portion of a targets metadata +type TargetsType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Targets map[string]*TargetFiles `json:"targets"` + Delegations *Delegations `json:"delegations,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TimestampType represents the Signed portion of a timestamp metadata +type TimestampType struct { + Type string `json:"_type"` + SpecVersion string `json:"spec_version"` + Version int64 `json:"version"` + Expires time.Time `json:"expires"` + Meta map[string]*MetaFiles `json:"meta"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Key represents a key in TUF +type Key struct { + Type string `json:"keytype"` + Scheme string `json:"scheme"` + Value KeyVal `json:"keyval"` + id string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +type KeyVal struct { + PublicKey string `json:"public"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Role represents one of the top-level roles in TUF +type Role struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + UnrecognizedFields map[string]any `json:"-"` +} + +type HexBytes []byte + +type Hashes map[string]HexBytes + +// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file. +type MetaFiles struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` + UnrecognizedFields map[string]any `json:"-"` +} + +// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file. +type TargetFiles struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` + Custom *json.RawMessage `json:"custom,omitempty"` + Path string `json:"-"` + UnrecognizedFields map[string]any `json:"-"` +} + +// Delegations is an optional object which represents delegation roles and their corresponding keys +type Delegations struct { + Keys map[string]*Key `json:"keys"` + Roles []DelegatedRole `json:"roles,omitempty"` + SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// DelegatedRole represents a delegated role in TUF +type DelegatedRole struct { + Name string `json:"name"` + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + Terminating bool `json:"terminating"` + PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` + Paths []string `json:"paths,omitempty"` + UnrecognizedFields map[string]any `json:"-"` +} + +// SuccinctRoles represents a delegation graph that covers all targets, +// distributing them uniformly over the delegated roles (i.e. bins) in the graph. +type SuccinctRoles struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` + BitLength int `json:"bit_length"` + NamePrefix string `json:"name_prefix"` + UnrecognizedFields map[string]any `json:"-"` +} + +// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath +type RoleResult struct { + Name string + Terminating bool +} diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go new file mode 100644 index 0000000000..7194365012 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go @@ -0,0 +1,711 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package updater + +import ( + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata" +) + +// Client update workflow implementation +// +// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow). +// "Updater" provides an API to query available targets and to download them in a +// secure manner: All downloaded files are verified by signed metadata. +// High-level description of "Updater" functionality: +// - Initializing an "Updater" loads and validates the trusted local root +// metadata: This root metadata is used as the source of trust for all other +// metadata. +// - Refresh() can optionally be called to update and load all top-level +// metadata as described in the specification, using both locally cached +// metadata and metadata downloaded from the remote repository. If refresh is +// not done explicitly, it will happen automatically during the first target +// info lookup. +// - Updater can be used to download targets. For each target: +// - GetTargetInfo() is first used to find information about a +// specific target. This will load new targets metadata as needed (from +// local cache or remote repository). +// - FindCachedTarget() can optionally be used to check if a +// target file is already locally cached. +// - DownloadTarget() downloads a target file and ensures it is +// verified correct by the metadata. +type Updater struct { + trusted *trustedmetadata.TrustedMetadata + cfg *config.UpdaterConfig +} + +type roleParentTuple struct { + Role string + Parent string +} + +// New creates a new Updater instance and loads trusted root metadata +func New(config *config.UpdaterConfig) (*Updater, error) { + // make sure the trusted root metadata and remote URL were provided + if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 { + return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided") + } + // create a new trusted metadata instance using the trusted root.json + trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot) + if err != nil { + return nil, err + } + // create an updater instance + updater := &Updater{ + cfg: config, + trusted: trustedMetadataSet, // save trusted metadata set + } + // ensure paths exist, doesn't do anything if caching is disabled + err = updater.cfg.EnsurePathsExist() + if err != nil { + return nil, err + } + // persist the initial root metadata to the local metadata folder + err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot) + if err != nil { + return nil, err + } + // all okay, return the updater instance + return updater, nil +} + +// Refresh loads and possibly refreshes top-level metadata. +// Downloads, verifies, and loads metadata for the top-level roles in the +// specified order (root -> timestamp -> snapshot -> targets) implementing +// all the checks required in the TUF client workflow. +// A Refresh() can be done only once during the lifetime of an Updater. +// If Refresh() has not been explicitly called before the first +// GetTargetInfo() call, it will be done implicitly at that time. +// The metadata for delegated roles is not updated by Refresh(): +// that happens on demand during GetTargetInfo(). However, if the +// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots), +// then all metadata downloaded by the Updater will use the same consistent repository state. +// +// If UnsafeLocalMode is set, no network interaction is performed, only +// the cached files on disk are used. If the cached data is not complete, +// this call will fail. +func (update *Updater) Refresh() error { + if update.cfg.UnsafeLocalMode { + return update.unsafeLocalRefresh() + } + return update.onlineRefresh() +} + +// onlineRefresh implements the TUF client workflow as described for +// the Refresh function. +func (update *Updater) onlineRefresh() error { + err := update.loadRoot() + if err != nil { + return err + } + err = update.loadTimestamp() + if err != nil { + return err + } + err = update.loadSnapshot() + if err != nil { + return err + } + _, err = update.loadTargets(metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + return nil +} + +// unsafeLoadRefresh tries to load the persisted metadata already cached +// on disk. Note that this is an usafe function, and does deviate from the +// TUF specification section 5.3 to 5.7 (update phases). +// The metadata on disk are verified against the provided root though, +// and expiration dates are verified. +func (update *Updater) unsafeLocalRefresh() error { + // Root is already loaded + // load timestamp + var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP) + data, err := update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + return err + } + + // load snapshot + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + + // targets + p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS) + data, err = update.loadLocalMetadata(p) + if err != nil { + return err + } + // verify and load the new target metadata + _, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT) + if err != nil { + return err + } + + return nil +} + +// GetTargetInfo returns metadata.TargetFiles instance with information +// for targetPath. The return value can be used as an argument to +// DownloadTarget() and FindCachedTarget(). +// If Refresh() has not been called before calling +// GetTargetInfo(), the refresh will be done implicitly. +// As a side-effect this method downloads all the additional (delegated +// targets) metadata it needs to return the target information. +func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) { + // do a Refresh() in case there's no trusted targets.json yet + if update.trusted.Targets[metadata.TARGETS] == nil { + err := update.Refresh() + if err != nil { + return nil, err + } + } + return update.preOrderDepthFirstWalk(targetPath) +} + +// DownloadTarget downloads the target file specified by targetFile +func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) { + log := metadata.GetLogger() + + var err error + if filePath == "" { + filePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } + if targetBaseURL == "" { + if update.cfg.RemoteTargetsURL == "" { + return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"} + } + targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL) + } else { + targetBaseURL = ensureTrailingSlash(targetBaseURL) + } + + targetFilePath := targetFile.Path + targetRemotePath := targetFilePath + consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot + if consistentSnapshot && update.cfg.PrefixTargetsWithHash { + hashes := "" + // get first hex value of hashes + for _, v := range targetFile.Hashes { + hashes = hex.EncodeToString(v) + break + } + baseName := filepath.Base(targetFilePath) + dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName) + if !ok { + // . + targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName) + } else { + // /. + targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName) + } + } + fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath) + data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, time.Second*15) + if err != nil { + return "", nil, err + } + err = targetFile.VerifyLengthHashes(data) + if err != nil { + return "", nil, err + } + + // do not persist the target file if cache is disabled + if !update.cfg.DisableLocalCache { + err = os.WriteFile(filePath, data, 0644) + if err != nil { + return "", nil, err + } + } + log.Info("Downloaded target", "path", targetFile.Path) + return filePath, data, nil +} + +// FindCachedTarget checks whether a local file is an up to date target +func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) { + var err error + targetFilePath := "" + // do not look for cached target file if cache is disabled + if update.cfg.DisableLocalCache { + return "", nil, nil + } + // get its path if not provided + if filePath == "" { + targetFilePath, err = update.generateTargetFilePath(targetFile) + if err != nil { + return "", nil, err + } + } else { + targetFilePath = filePath + } + // get file content + data, err := readFile(targetFilePath) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // verify if the length and hashes of this target file match the expected values + err = targetFile.VerifyLengthHashes(data) + if err != nil { + // do not want to return err, instead we say that there's no cached target available + return "", nil, nil + } + // if all okay, return its path + return targetFilePath, data, nil +} + +// loadTimestamp load local and remote timestamp metadata +func (update *Updater) loadTimestamp() error { + log := metadata.GetLogger() + // try to read local timestamp + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)) + if err != nil { + // this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp + log.Info("Local timestamp does not exist") + } else { + // local timestamp exists, let's try to verify it and load it to the trusted metadata set + _, err := update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrRepository{}) { + // local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local timestamp is not valid") + } else { + // another error + return err + } + } + log.Info("Local timestamp is valid") + // all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote + } + // load from remote (whether local load succeeded or not) + data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "") + if err != nil { + return err + } + // try to verify and load the newly downloaded timestamp + _, err = update.trusted.UpdateTimestamp(data) + if err != nil { + if errors.Is(err, &metadata.ErrEqualVersionNumber{}) { + // if the new timestamp version is the same as current, discard the + // new timestamp; this is normal and it shouldn't raise any error + return nil + } else { + // another error + return err + } + } + // proceed with persisting the new timestamp + err = update.persistMetadata(metadata.TIMESTAMP, data) + if err != nil { + return err + } + return nil +} + +// loadSnapshot load local (and if needed remote) snapshot metadata +func (update *Updater) loadSnapshot() error { + log := metadata.GetLogger() + // try to read local snapshot + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)) + if err != nil { + // this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot + log.Info("Local snapshot does not exist") + } else { + // successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set + _, err = update.trusted.UpdateSnapshot(data, true) + if err != nil { + // this means snapshot verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local snapshot is not valid") + } else { + // another error + return err + } + } else { + // this means snapshot verification/loading succeeded + log.Info("Local snapshot is valid: not downloading new one") + return nil + } + } + // local snapshot does not exist or is invalid, update from remote + log.Info("Failed to load local snapshot") + if update.trusted.Timestamp == nil { + return fmt.Errorf("trusted timestamp not set") + } + // extract the snapshot meta from the trusted timestamp metadata + snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)] + // extract the length of the snapshot metadata to be downloaded + length := snapshotMeta.Length + if length == 0 { + length = update.cfg.SnapshotMaxLength + } + // extract which snapshot version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(snapshotMeta.Version, 10) + } + // download snapshot metadata + data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version) + if err != nil { + return err + } + // verify and load the new snapshot + _, err = update.trusted.UpdateSnapshot(data, false) + if err != nil { + return err + } + // persist the new snapshot + err = update.persistMetadata(metadata.SNAPSHOT, data) + if err != nil { + return err + } + return nil +} + +// loadTargets load local (and if needed remote) metadata for roleName +func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) { + log := metadata.GetLogger() + // avoid loading "roleName" more than once during "GetTargetInfo" + role, ok := update.trusted.Targets[roleName] + if ok { + return role, nil + } + // try to read local targets + data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName)) + if err != nil { + // this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets + log.Info("Local role does not exist", "role", roleName) + } else { + // successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + // this means targets verification/loading failed + if errors.Is(err, &metadata.ErrRepository{}) { + // local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors + log.Info("Local role is not valid", "role", roleName) + } else { + // another error + return nil, err + } + } else { + // this means targets verification/loading succeeded + log.Info("Local role is valid: not downloading new one", "role", roleName) + return delegatedTargets, nil + } + } + // local "roleName" does not exist or is invalid, update from remote + log.Info("Failed to load local role", "role", roleName) + if update.trusted.Snapshot == nil { + return nil, fmt.Errorf("trusted snapshot not set") + } + // extract the targets meta from the trusted snapshot metadata + metaInfo := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)] + // extract the length of the target metadata to be downloaded + length := metaInfo.Length + if length == 0 { + length = update.cfg.TargetsMaxLength + } + // extract which target metadata version should be downloaded in case of consistent snapshots + version := "" + if update.trusted.Root.Signed.ConsistentSnapshot { + version = strconv.FormatInt(metaInfo.Version, 10) + } + // download targets metadata + data, err = update.downloadMetadata(roleName, length, version) + if err != nil { + return nil, err + } + // verify and load the new target metadata + delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName) + if err != nil { + return nil, err + } + // persist the new target metadata + err = update.persistMetadata(roleName, data) + if err != nil { + return nil, err + } + return delegatedTargets, nil +} + +// loadRoot load remote root metadata. Sequentially load and +// persist on local disk every newer root metadata version +// available on the remote +func (update *Updater) loadRoot() error { + // calculate boundaries + lowerBound := update.trusted.Root.Signed.Version + 1 + upperBound := lowerBound + update.cfg.MaxRootRotations + + // loop until we find the latest available version of root (download -> verify -> load -> persist) + for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ { + data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10)) + if err != nil { + // downloading the root metadata failed for some reason + var tmpErr *metadata.ErrDownloadHTTP + if errors.As(err, &tmpErr) { + if tmpErr.StatusCode != http.StatusNotFound && tmpErr.StatusCode != http.StatusForbidden { + // unexpected HTTP status code + return err + } + // 404/403 means current root is newest available, so we can stop the loop and move forward + break + } + // some other error ocurred + return err + } else { + // downloading root metadata succeeded, so let's try to verify and load it + _, err = update.trusted.UpdateRoot(data) + if err != nil { + return err + } + // persist root metadata to disk + err = update.persistMetadata(metadata.ROOT, data) + if err != nil { + return err + } + } + } + return nil +} + +// preOrderDepthFirstWalk interrogates the tree of target delegations +// in order of appearance (which implicitly order trustworthiness), +// and returns the matching target found in the most trusted role. +func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) { + log := metadata.GetLogger() + // list of delegations to be interrogated. A (role, parent role) pair + // is needed to load and verify the delegated targets metadata + delegationsToVisit := []roleParentTuple{{ + Role: metadata.TARGETS, + Parent: metadata.ROOT, + }} + visitedRoleNames := map[string]bool{} + // pre-order depth-first traversal of the graph of target delegations + for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 { + // pop the role name from the top of the stack + delegation := delegationsToVisit[len(delegationsToVisit)-1] + delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1] + // skip any visited current role to prevent cycles + _, ok := visitedRoleNames[delegation.Role] + if ok { + log.Info("Skipping visited current role", "role", delegation.Role) + continue + } + // the metadata for delegation.Role must be downloaded/updated before + // its targets, delegations, and child roles can be inspected + targets, err := update.loadTargets(delegation.Role, delegation.Parent) + if err != nil { + return nil, err + } + target, ok := targets.Signed.Targets[targetFilePath] + if ok { + log.Info("Found target in current role", "role", delegation.Role) + return target, nil + } + // after pre-order check, add current role to set of visited roles + visitedRoleNames[delegation.Role] = true + if targets.Signed.Delegations != nil { + var childRolesToVisit []roleParentTuple + // note that this may be a slow operation if there are many + // delegated roles + roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath) + for _, rolesForTarget := range roles { + log.Info("Adding child role", "role", rolesForTarget.Name) + childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role}) + if rolesForTarget.Terminating { + log.Info("Not backtracking to other roles") + delegationsToVisit = []roleParentTuple{} + break + } + } + // push childRolesToVisit in reverse order of appearance + // onto delegationsToVisit. Roles are popped from the end of + // the list + reverseSlice(childRolesToVisit) + delegationsToVisit = append(delegationsToVisit, childRolesToVisit...) + } + } + if len(delegationsToVisit) > 0 { + log.Info("Too many roles left to visit for max allowed delegations", + "roles-left", len(delegationsToVisit), + "allowed-delegations", update.cfg.MaxDelegations) + } + // if this point is reached then target is not found, return nil + return nil, fmt.Errorf("target %s not found", targetFilePath) +} + +// persistMetadata writes metadata to disk atomically to avoid data loss +func (update *Updater) persistMetadata(roleName string, data []byte) error { + log := metadata.GetLogger() + // do not persist the metadata if we have disabled local caching + if update.cfg.DisableLocalCache { + return nil + } + // caching enabled, proceed with persisting the metadata locally + fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.QueryEscape(roleName))) + // create a temporary file + file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp") + if err != nil { + return err + } + defer file.Close() + // write the data content to the temporary file + err = os.WriteFile(file.Name(), data, 0644) + if err != nil { + // delete the temporary file if there was an error while writing + errRemove := os.Remove(file.Name()) + if errRemove != nil { + log.Info("Failed to delete temporary file", "name", file.Name()) + } + return err + } + + // can't move/rename an open file on windows, so close it first + err = file.Close() + if err != nil { + return err + } + // if all okay, rename the temporary file to the desired one + err = os.Rename(file.Name(), fileName) + if err != nil { + return err + } + read, err := os.ReadFile(fileName) + if err != nil { + return err + } + if string(read) != string(data) { + return fmt.Errorf("failed to persist metadata") + } + return nil +} + +// downloadMetadata download a metadata file and return it as bytes +func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) { + urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL) + // build urlPath + if version == "" { + urlPath = fmt.Sprintf("%s%s.json", urlPath, url.QueryEscape(roleName)) + } else { + urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.QueryEscape(roleName)) + } + return update.cfg.Fetcher.DownloadFile(urlPath, length, time.Second*15) +} + +// generateTargetFilePath generates path from TargetFiles +func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) { + // LocalTargetsDir can be omitted if caching is disabled + if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache { + return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"} + } + // Use URL encoded target path as filename + return filepath.Join(update.cfg.LocalTargetsDir, url.QueryEscape(tf.Path)), nil +} + +// loadLocalMetadata reads a local .json file and returns its bytes +func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) { + return readFile(fmt.Sprintf("%s.json", roleName)) +} + +// GetTopLevelTargets returns the top-level target files +func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles { + return update.trusted.Targets[metadata.TARGETS].Signed.Targets +} + +// GetTrustedMetadataSet returns the trusted metadata set +func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata { + return *update.trusted +} + +// UnsafeSetRefTime sets the reference time that the updater uses. +// This should only be done in tests. +// Using this function is useful when testing time-related behavior in go-tuf. +func (update *Updater) UnsafeSetRefTime(t time.Time) { + update.trusted.RefTime = t +} + +func IsWindowsPath(path string) bool { + match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path) + return match +} + +// ensureTrailingSlash ensures url ends with a slash +func ensureTrailingSlash(url string) string { + if IsWindowsPath(url) { + slash := string(filepath.Separator) + if strings.HasSuffix(url, slash) { + return url + } + return url + slash + } + if strings.HasSuffix(url, "/") { + return url + } + return url + "/" +} + +// reverseSlice reverses the elements in a generic type of slice +func reverseSlice[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// readFile reads the content of a file and return its bytes +func readFile(name string) ([]byte, error) { + in, err := os.Open(name) + if err != nil { + return nil, err + } + defer in.Close() + data, err := io.ReadAll(in) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/.golangci.yml b/vendor/github.com/xanzy/go-gitlab/.golangci.yml index 2d74c0df54..7c05feeefc 100644 --- a/vendor/github.com/xanzy/go-gitlab/.golangci.yml +++ b/vendor/github.com/xanzy/go-gitlab/.golangci.yml @@ -10,7 +10,8 @@ run: # Output configuration options output: - format: line-number + formats: + - format: line-number # All available settings of specific linters linters-settings: diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md index 9ed7d40a1e..fa5a049a3b 100644 --- a/vendor/github.com/xanzy/go-gitlab/README.md +++ b/vendor/github.com/xanzy/go-gitlab/README.md @@ -160,11 +160,11 @@ func main() { // Create new project p := &gitlab.CreateProjectOptions{ - Name: gitlab.Ptr("My Project"), - Description: gitlab.Ptr("Just a test project to play with"), - MergeRequestsEnabled: gitlab.Ptr(true), - SnippetsEnabled: gitlab.Ptr(true), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), + Name: gitlab.Ptr("My Project"), + Description: gitlab.Ptr("Just a test project to play with"), + MergeRequestsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + SnippetsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), } project, _, err := git.Projects.CreateProject(p) if err != nil { diff --git a/vendor/github.com/xanzy/go-gitlab/audit_events.go b/vendor/github.com/xanzy/go-gitlab/audit_events.go index 95227c4765..de312e5606 100644 --- a/vendor/github.com/xanzy/go-gitlab/audit_events.go +++ b/vendor/github.com/xanzy/go-gitlab/audit_events.go @@ -16,6 +16,7 @@ type AuditEvent struct { EntityType string `json:"entity_type"` Details AuditEventDetails `json:"details"` CreatedAt *time.Time `json:"created_at"` + EventType string `json:"event_type"` } // AuditEventDetails represents the details portion of an audit event for @@ -33,6 +34,8 @@ type AuditEventDetails struct { Remove string `json:"remove"` CustomMessage string `json:"custom_message"` AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthorClass string `json:"author_class"` TargetID interface{} `json:"target_id"` TargetType string `json:"target_type"` TargetDetails string `json:"target_details"` diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go index b87dd9f107..c1a9ef3d60 100644 --- a/vendor/github.com/xanzy/go-gitlab/commits.go +++ b/vendor/github.com/xanzy/go-gitlab/commits.go @@ -35,24 +35,25 @@ type CommitsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/commits.html type Commit struct { - ID string `json:"id"` - ShortID string `json:"short_id"` - Title string `json:"title"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthoredDate *time.Time `json:"authored_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - CommittedDate *time.Time `json:"committed_date"` - CreatedAt *time.Time `json:"created_at"` - Message string `json:"message"` - ParentIDs []string `json:"parent_ids"` - Stats *CommitStats `json:"stats"` - Status *BuildStateValue `json:"status"` - LastPipeline *PipelineInfo `json:"last_pipeline"` - ProjectID int `json:"project_id"` - Trailers map[string]string `json:"trailers"` - WebURL string `json:"web_url"` + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthoredDate *time.Time `json:"authored_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CommittedDate *time.Time `json:"committed_date"` + CreatedAt *time.Time `json:"created_at"` + Message string `json:"message"` + ParentIDs []string `json:"parent_ids"` + Stats *CommitStats `json:"stats"` + Status *BuildStateValue `json:"status"` + LastPipeline *PipelineInfo `json:"last_pipeline"` + ProjectID int `json:"project_id"` + Trailers map[string]string `json:"trailers"` + ExtendedTrailers map[string]string `json:"extended_trailers"` + WebURL string `json:"web_url"` } // CommitStats represents the number of added and deleted files in a commit. @@ -151,11 +152,19 @@ func (s *CommitsService) GetCommitRefs(pid interface{}, sha string, opt *GetComm return cs, resp, nil } +// GetCommitOptions represents the available GetCommit() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit +type GetCommitOptions struct { + Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` +} + // GetCommit gets a specific commit identified by the commit hash or name of a // branch or tag. // // GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit -func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...RequestOptionFunc) (*Commit, *Response, error) { +func (s *CommitsService) GetCommit(pid interface{}, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err @@ -165,7 +174,7 @@ func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...Reque } u := fmt.Sprintf("projects/%s/repository/commits/%s", PathEscape(project), url.PathEscape(sha)) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/container_registry.go b/vendor/github.com/xanzy/go-gitlab/container_registry.go index eaf0e34628..bec477df65 100644 --- a/vendor/github.com/xanzy/go-gitlab/container_registry.go +++ b/vendor/github.com/xanzy/go-gitlab/container_registry.go @@ -41,6 +41,7 @@ type RegistryRepository struct { Location string `json:"location"` CreatedAt *time.Time `json:"created_at"` CleanupPolicyStartedAt *time.Time `json:"cleanup_policy_started_at"` + Status *ContainerRegistryStatus `json:"status"` TagsCount int `json:"tags_count"` Tags []*RegistryRepositoryTag `json:"tags"` } diff --git a/vendor/github.com/xanzy/go-gitlab/deployments.go b/vendor/github.com/xanzy/go-gitlab/deployments.go index 3ce8ec40fc..05301acfc8 100644 --- a/vendor/github.com/xanzy/go-gitlab/deployments.go +++ b/vendor/github.com/xanzy/go-gitlab/deployments.go @@ -88,7 +88,8 @@ type ListProjectDeploymentsOptions struct { // ListProjectDeployments gets a list of deployments in a project. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments func (s *DeploymentsService) ListProjectDeployments(pid interface{}, opts *ListProjectDeploymentsOptions, options ...RequestOptionFunc) ([]*Deployment, *Response, error) { project, err := parseID(pid) if err != nil { @@ -112,7 +113,8 @@ func (s *DeploymentsService) ListProjectDeployments(pid interface{}, opts *ListP // GetProjectDeployment get a deployment for a project. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#get-a-specific-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#get-a-specific-deployment func (s *DeploymentsService) GetProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Deployment, *Response, error) { project, err := parseID(pid) if err != nil { @@ -137,7 +139,8 @@ func (s *DeploymentsService) GetProjectDeployment(pid interface{}, deployment in // CreateProjectDeploymentOptions represents the available // CreateProjectDeployment() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment type CreateProjectDeploymentOptions struct { Environment *string `url:"environment,omitempty" json:"environment,omitempty"` Ref *string `url:"ref,omitempty" json:"ref,omitempty"` @@ -148,7 +151,8 @@ type CreateProjectDeploymentOptions struct { // CreateProjectDeployment creates a project deployment. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment func (s *DeploymentsService) CreateProjectDeployment(pid interface{}, opt *CreateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { project, err := parseID(pid) if err != nil { @@ -173,14 +177,16 @@ func (s *DeploymentsService) CreateProjectDeployment(pid interface{}, opt *Creat // UpdateProjectDeploymentOptions represents the available // UpdateProjectDeployment() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment type UpdateProjectDeploymentOptions struct { Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` } // UpdateProjectDeployment updates a project deployment. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment func (s *DeploymentsService) UpdateProjectDeployment(pid interface{}, deployment int, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { project, err := parseID(pid) if err != nil { @@ -202,9 +208,42 @@ func (s *DeploymentsService) UpdateProjectDeployment(pid interface{}, deployment return d, resp, nil } +// ApproveOrRejectProjectDeploymentOptions represents the available +// ApproveOrRejectProjectDeployment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment +type ApproveOrRejectProjectDeploymentOptions struct { + Status *DeploymentApprovalStatus `url:"status,omitempty" json:"status,omitempty"` + Comment *string `url:"comment,omitempty" json:"comment,omitempty"` + RepresentedAs *string `url:"represented_as,omitempty" json:"represented_as,omitempty"` +} + +// ApproveOrRejectProjectDeployment approve or reject a blocked deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment +func (s *DeploymentsService) ApproveOrRejectProjectDeployment(pid interface{}, deployment int, + opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc, +) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d/approval", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // DeleteProjectDeployment delete a project deployment. // -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html#delete-a-specific-deployment +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#delete-a-specific-deployment func (s *DeploymentsService) DeleteProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { diff --git a/vendor/github.com/xanzy/go-gitlab/dora_metrics.go b/vendor/github.com/xanzy/go-gitlab/dora_metrics.go new file mode 100644 index 0000000000..a2ad418eb1 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/dora_metrics.go @@ -0,0 +1,110 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// DORAMetricsService handles communication with the DORA metrics related methods +// of the GitLab API. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricsService struct { + client *Client +} + +// DORAMetric represents a single DORA metric data point. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetric struct { + Date string `json:"date"` + Value float64 `json:"value"` +} + +// Gets a string representation of a DORAMetric data point +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +func (m DORAMetric) String() string { + return Stringify(m) +} + +// GetDORAMetricsOptions represent the request body options for getting +// DORA metrics +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type GetDORAMetricsOptions struct { + Metric *DORAMetricType `url:"metric,omitempty" json:"metric,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` + EnvironmentTiers *[]string `url:"environment_tiers,comma,omitempty" json:"environment_tiers,omitempty"` + Interval *DORAMetricInterval `url:"interval,omitempty" json:"interval,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + + // Deprecated, use environment tiers instead + EnvironmentTier *string `url:"environment_tier,omitempty" json:"environment_tier,omitempty"` +} + +// GetProjectDORAMetrics gets the DORA metrics for a project. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-project-level-dora-metrics +func (s *DORAMetricsService) GetProjectDORAMetrics(pid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/dora/metrics", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} + +// GetGroupDORAMetrics gets the DORA metrics for a group. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-group-level-dora-metrics +func (s *DORAMetricsService) GetGroupDORAMetrics(gid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/dora/metrics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/draft_notes.go b/vendor/github.com/xanzy/go-gitlab/draft_notes.go new file mode 100644 index 0000000000..376e4d0c86 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/draft_notes.go @@ -0,0 +1,233 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +type DraftNote struct { + ID int `json:"id"` + AuthorID int `json:"author_id"` + MergeRequestID int `json:"merge_request_id"` + ResolveDiscussion bool `json:"resolve_discussion"` + DiscussionID string `json:"discussion_id"` + Note string `json:"note"` + CommitID string `json:"commit_id"` + LineCode string `json:"line_code"` + Position *NotePosition `json:"position"` +} + +// DraftNotesService handles communication with the draft notes related methods +// of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +type DraftNotesService struct { + client *Client +} + +// ListDraftNotesOptions represents the available ListDraftNotes() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +type ListDraftNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListDraftNotes gets a list of all draft notes for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +func (s *DraftNotesService) ListDraftNotes(pid interface{}, mergeRequest int, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*DraftNote + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetDraftNote gets a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#get-a-single-draft-note +func (s *DraftNotesService) GetDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateDraftNoteOptions represents the available CreateDraftNote() +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +type CreateDraftNoteOptions struct { + Note *string `url:"note" json:"note"` + CommitID *string `url:"commit_id,omitempty" json:"commit_id,omitempty"` + InReplyToDiscussionID *string `url:"in_reply_to_discussion_id,omitempty" json:"in_reply_to_discussion_id,omitempty"` + ResolveDiscussion *bool `url:"resolve_discussion,omitempty" json:"resolve_discussion,omitempty"` + Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` +} + +// CreateDraftNote creates a draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +func (s *DraftNotesService) CreateDraftNote(pid interface{}, mergeRequest int, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateDraftNoteOptions represents the available UpdateDraftNote() +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +type UpdateDraftNoteOptions struct { + Note *string `url:"note,omitempty" json:"note,omitempty"` + Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` +} + +// UpdateDraftNote updates a draft note for a merge request. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +func (s *DraftNotesService) UpdateDraftNote(pid interface{}, mergeRequest int, note int, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteDraftNote deletes a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#delete-a-draft-note +func (s *DraftNotesService) DeleteDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PublishDraftNote publishes a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note +func (s *DraftNotesService) PublishDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d/publish", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PublishAllDraftNotes publishes all draft notes for a merge request that belong to the user. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note +func (s *DraftNotesService) PublishAllDraftNotes(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/bulk_publish", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/epics.go b/vendor/github.com/xanzy/go-gitlab/epics.go index cd553ead57..684ffb3343 100644 --- a/vendor/github.com/xanzy/go-gitlab/epics.go +++ b/vendor/github.com/xanzy/go-gitlab/epics.go @@ -175,12 +175,16 @@ func (s *EpicsService) GetEpicLinks(gid interface{}, epic int, options ...Reques // GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic type CreateEpicOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` } // CreateEpic creates a new group epic. @@ -211,15 +215,20 @@ func (s *EpicsService) CreateEpic(gid interface{}, opt *CreateEpicOptions, optio // // GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic type UpdateEpicOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` + AddLabels *LabelOptions `url:"add_labels,omitempty" json:"add_labels,omitempty"` Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` + DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + RemoveLabels *LabelOptions `url:"remove_labels,omitempty" json:"remove_labels,omitempty"` StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` - DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` - DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` } // UpdateEpic updates an existing group epic. This function is also used diff --git a/vendor/github.com/xanzy/go-gitlab/event_parsing.go b/vendor/github.com/xanzy/go-gitlab/event_parsing.go index 3943abadb1..0f474211d3 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_parsing.go +++ b/vendor/github.com/xanzy/go-gitlab/event_parsing.go @@ -27,24 +27,25 @@ type EventType string // List of available event types. const ( - EventConfidentialIssue EventType = "Confidential Issue Hook" - EventConfidentialNote EventType = "Confidential Note Hook" - EventTypeBuild EventType = "Build Hook" - EventTypeDeployment EventType = "Deployment Hook" - EventTypeFeatureFlag EventType = "Feature Flag Hook" - EventTypeIssue EventType = "Issue Hook" - EventTypeJob EventType = "Job Hook" - EventTypeMember EventType = "Member Hook" - EventTypeMergeRequest EventType = "Merge Request Hook" - EventTypeNote EventType = "Note Hook" - EventTypePipeline EventType = "Pipeline Hook" - EventTypePush EventType = "Push Hook" - EventTypeRelease EventType = "Release Hook" - EventTypeServiceHook EventType = "Service Hook" - EventTypeSubGroup EventType = "Subgroup Hook" - EventTypeSystemHook EventType = "System Hook" - EventTypeTagPush EventType = "Tag Push Hook" - EventTypeWikiPage EventType = "Wiki Page Hook" + EventConfidentialIssue EventType = "Confidential Issue Hook" + EventConfidentialNote EventType = "Confidential Note Hook" + EventTypeBuild EventType = "Build Hook" + EventTypeDeployment EventType = "Deployment Hook" + EventTypeFeatureFlag EventType = "Feature Flag Hook" + EventTypeIssue EventType = "Issue Hook" + EventTypeJob EventType = "Job Hook" + EventTypeMember EventType = "Member Hook" + EventTypeMergeRequest EventType = "Merge Request Hook" + EventTypeNote EventType = "Note Hook" + EventTypePipeline EventType = "Pipeline Hook" + EventTypePush EventType = "Push Hook" + EventTypeRelease EventType = "Release Hook" + EventTypeResourceAccessToken EventType = "Resource Access Token Hook" + EventTypeServiceHook EventType = "Service Hook" + EventTypeSubGroup EventType = "Subgroup Hook" + EventTypeSystemHook EventType = "System Hook" + EventTypeTagPush EventType = "Tag Push Hook" + EventTypeWikiPage EventType = "Wiki Page Hook" ) const ( @@ -71,6 +72,13 @@ type serviceEvent struct { ObjectKind string `json:"object_kind"` } +const eventTokenHeader = "X-Gitlab-Token" + +// HookEventToken returns the token for the given request. +func HookEventToken(r *http.Request) string { + return r.Header.Get(eventTokenHeader) +} + const eventTypeHeader = "X-Gitlab-Event" // HookEventType returns the event type for the given request. @@ -252,6 +260,24 @@ func ParseWebhook(eventType EventType, payload []byte) (event interface{}, err e event = &PushEvent{} case EventTypeRelease: event = &ReleaseEvent{} + case EventTypeResourceAccessToken: + data := map[string]interface{}{} + err := json.Unmarshal(payload, &data) + if err != nil { + return nil, err + } + + _, groupEvent := data["group"] + _, projectEvent := data["project"] + + switch { + case groupEvent: + event = &GroupResourceAccessTokenEvent{} + case projectEvent: + event = &ProjectResourceAccessTokenEvent{} + default: + return nil, fmt.Errorf("unexpected resource access token payload") + } case EventTypeServiceHook: service := &serviceEvent{} err := json.Unmarshal(payload, service) diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go index fa1fe5c7d9..c4a8e4aeb9 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go +++ b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go @@ -102,21 +102,22 @@ type CommitCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Commit *struct { ID string `json:"id"` @@ -131,7 +132,7 @@ type CommitCommentEvent struct { } `json:"commit"` } -// DeploymentEvent represents a deployment event +// DeploymentEvent represents a deployment event. // // GitLab API docs: // https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events @@ -171,7 +172,7 @@ type DeploymentEvent struct { CommitTitle string `json:"commit_title"` } -// FeatureFlagEvent represents a feature flag event +// FeatureFlagEvent represents a feature flag event. // // GitLab API docs: // https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#feature-flag-events @@ -205,6 +206,28 @@ type FeatureFlagEvent struct { } `json:"object_attributes"` } +// GroupResourceAccessTokenEvent represents a resource access token event for a +// group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type GroupResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Group struct { + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + } `json:"group"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + // IssueCommentEvent represents a comment on an issue event. // // GitLab API docs: @@ -232,22 +255,23 @@ type IssueCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - DiscussionID string `json:"discussion_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff []*Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + DiscussionID string `json:"discussion_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff []*Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Issue struct { ID int `json:"id"` @@ -491,30 +515,31 @@ type MergeCommentEvent struct { Visibility VisibilityValue `json:"visibility"` } `json:"project"` ObjectAttributes struct { - Attachment string `json:"attachment"` - AuthorID int `json:"author_id"` - ChangePosition *NotePosition `json:"change_position"` - CommitID string `json:"commit_id"` - CreatedAt string `json:"created_at"` - DiscussionID string `json:"discussion_id"` - ID int `json:"id"` - LineCode string `json:"line_code"` - Note string `json:"note"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - OriginalPosition *NotePosition `json:"original_position"` - Position *NotePosition `json:"position"` - ProjectID int `json:"project_id"` - ResolvedAt string `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` - ResolvedByPush bool `json:"resolved_by_push"` - StDiff *Diff `json:"st_diff"` - System bool `json:"system"` - Type string `json:"type"` - UpdatedAt string `json:"updated_at"` - UpdatedByID string `json:"updated_by_id"` - Description string `json:"description"` - URL string `json:"url"` + Attachment string `json:"attachment"` + AuthorID int `json:"author_id"` + ChangePosition *NotePosition `json:"change_position"` + CommitID string `json:"commit_id"` + CreatedAt string `json:"created_at"` + DiscussionID string `json:"discussion_id"` + ID int `json:"id"` + LineCode string `json:"line_code"` + Note string `json:"note"` + NoteableID int `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + OriginalPosition *NotePosition `json:"original_position"` + Position *NotePosition `json:"position"` + ProjectID int `json:"project_id"` + ResolvedAt string `json:"resolved_at"` + ResolvedByID int `json:"resolved_by_id"` + ResolvedByPush bool `json:"resolved_by_push"` + StDiff *Diff `json:"st_diff"` + System bool `json:"system"` + Type string `json:"type"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int `json:"updated_by_id"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Repository *Repository `json:"repository"` MergeRequest struct { @@ -737,7 +762,8 @@ type MergeEvent struct { Reviewers []*EventUser `json:"reviewers"` } -// EventUser represents a user record in an event and is used as an even initiator or a merge assignee. +// EventUser represents a user record in an event and is used as an even +// initiator or a merge assignee. type EventUser struct { ID int `json:"id"` Name string `json:"name"` @@ -753,7 +779,8 @@ type MergeParams struct { // UnmarshalJSON decodes the merge parameters // -// This allows support of ForceRemoveSourceBranch for both type bool (>11.9) and string (<11.9) +// This allows support of ForceRemoveSourceBranch for both type +// bool (>11.9) and string (<11.9) func (p *MergeParams) UnmarshalJSON(b []byte) error { type Alias MergeParams raw := struct { @@ -899,6 +926,41 @@ type PipelineEvent struct { } `json:"builds"` } +// ProjectResourceAccessTokenEvent represents a resource access token event for +// a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type ProjectResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + // PushEvent represents a push event. // // GitLab API docs: @@ -1037,21 +1099,22 @@ type SnippetCommentEvent struct { } `json:"project"` Repository *Repository `json:"repository"` ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - URL string `json:"url"` + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` } `json:"object_attributes"` Snippet *struct { ID int `json:"id"` diff --git a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go index dc8e95b829..c6a3f7b285 100644 --- a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go +++ b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go @@ -197,3 +197,22 @@ func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid interface{}, return s.client.Do(req, nil) } + +// UpdateExternalStatusCheck updates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#retry-failed-status-check-for-a-merge-request +func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid interface{}, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go index c773ef5cb5..19ed3eadb9 100644 --- a/vendor/github.com/xanzy/go-gitlab/gitlab.go +++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go @@ -21,8 +21,10 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" + "math" "math/rand" "mime/multipart" "net/http" @@ -65,6 +67,8 @@ const ( PrivateToken ) +var ErrNotFound = errors.New("404 Not Found") + // A Client manages communication with the GitLab API. type Client struct { // HTTP client used to communicate with the API. @@ -124,6 +128,8 @@ type Client struct { Deployments *DeploymentsService Discussions *DiscussionsService DockerfileTemplate *DockerfileTemplatesService + DORAMetrics *DORAMetricsService + DraftNotes *DraftNotesService Environments *EnvironmentsService EpicIssues *EpicIssuesService Epics *EpicsService @@ -151,6 +157,7 @@ type Client struct { GroupVariables *GroupVariablesService GroupWikis *GroupWikisService Groups *GroupsService + Import *ImportService InstanceCluster *InstanceClustersService InstanceVariables *InstanceVariablesService Invites *InvitesService @@ -204,6 +211,7 @@ type Client struct { Repositories *RepositoriesService RepositoryFiles *RepositoryFilesService RepositorySubmodules *RepositorySubmodulesService + ResourceGroup *ResourceGroupService ResourceIterationEvents *ResourceIterationEventsService ResourceLabelEvents *ResourceLabelEventsService ResourceMilestoneEvents *ResourceMilestoneEventsService @@ -229,15 +237,16 @@ type Client struct { // ListOptions specifies the optional parameters to various List methods that // support pagination. type ListOptions struct { - // For offset-based paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty" json:"page,omitempty"` + // For keyset-based paginated result sets, the value must be `"keyset"` + Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` // For offset-based and keyset-based paginated result sets, the number of results to include per page. PerPage int `url:"per_page,omitempty" json:"per_page,omitempty"` - + // For offset-based paginated result sets, page of results to retrieve. + Page int `url:"page,omitempty" json:"page,omitempty"` + // For keyset-based paginated result sets, tree record ID at which to fetch the next page. + PageToken string `url:"page_token,omitempty" json:"page_token,omitempty"` // For keyset-based paginated result sets, name of the column by which to order OrderBy string `url:"order_by,omitempty" json:"order_by,omitempty"` - // For keyset-based paginated result sets, the value must be `"keyset"` - Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` // For keyset-based paginated result sets, sort order (`"asc"`` or `"desc"`) Sort string `url:"sort,omitempty" json:"sort,omitempty"` } @@ -357,6 +366,8 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.Deployments = &DeploymentsService{client: c} c.Discussions = &DiscussionsService{client: c} c.DockerfileTemplate = &DockerfileTemplatesService{client: c} + c.DORAMetrics = &DORAMetricsService{client: c} + c.DraftNotes = &DraftNotesService{client: c} c.Environments = &EnvironmentsService{client: c} c.EpicIssues = &EpicIssuesService{client: c} c.Epics = &EpicsService{client: c} @@ -384,6 +395,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.GroupVariables = &GroupVariablesService{client: c} c.GroupWikis = &GroupWikisService{client: c} c.Groups = &GroupsService{client: c} + c.Import = &ImportService{client: c} c.InstanceCluster = &InstanceClustersService{client: c} c.InstanceVariables = &InstanceVariablesService{client: c} c.Invites = &InvitesService{client: c} @@ -437,6 +449,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.Repositories = &RepositoriesService{client: c} c.RepositoryFiles = &RepositoryFilesService{client: c} c.RepositorySubmodules = &RepositorySubmodulesService{client: c} + c.ResourceGroup = &ResourceGroupService{client: c} c.ResourceIterationEvents = &ResourceIterationEventsService{client: c} c.ResourceLabelEvents = &ResourceLabelEventsService{client: c} c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} @@ -513,6 +526,11 @@ func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Respons min = wait } } + } else { + // In case the RateLimit-Reset header is not set, back off an additional + // 100% exponentially. With the default milliseconds being set to 100 for + // `min`, this makes the 5th retry wait 3.2 seconds (3,200 ms) by default. + min = time.Duration(float64(min) * math.Pow(2, float64(attemptNum))) } } @@ -951,8 +969,13 @@ type ErrorResponse struct { func (e *ErrorResponse) Error() string { path, _ := url.QueryUnescape(e.Response.Request.URL.Path) - u := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) - return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, u, e.Response.StatusCode, e.Message) + url := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) + + if e.Message == "" { + return fmt.Sprintf("%s %s: %d", e.Response.Request.Method, url, e.Response.StatusCode) + } else { + return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) + } } // CheckResponse checks the API response for errors, and returns them if present. @@ -960,11 +983,14 @@ func CheckResponse(r *http.Response) error { switch r.StatusCode { case 200, 201, 202, 204, 304: return nil + case 404: + return ErrNotFound } errorResponse := &ErrorResponse{Response: r} + data, err := io.ReadAll(r.Body) - if err == nil && data != nil { + if err == nil && strings.TrimSpace(string(data)) != "" { errorResponse.Body = data var raw interface{} diff --git a/vendor/github.com/xanzy/go-gitlab/group_hooks.go b/vendor/github.com/xanzy/go-gitlab/group_hooks.go index ccfed1266b..414a8d0864 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_hooks.go +++ b/vendor/github.com/xanzy/go-gitlab/group_hooks.go @@ -26,26 +26,30 @@ import ( // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks type GroupHook struct { - ID int `json:"id"` - URL string `json:"url"` - GroupID int `json:"group_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - SubGroupEvents bool `json:"subgroup_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - AlertStatus string `json:"alert_status"` - CreatedAt *time.Time `json:"created_at"` + ID int `json:"id"` + URL string `json:"url"` + GroupID int `json:"group_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + SubGroupEvents bool `json:"subgroup_events"` + MemberEvents bool `json:"member_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + AlertStatus string `json:"alert_status"` + CreatedAt *time.Time `json:"created_at"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomHeaders []*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // ListGroupHooksOptions represents the available ListGroupHooks() options. @@ -105,23 +109,27 @@ func (s *GroupsService) GetGroupHook(pid interface{}, hook int, options ...Reque // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook type AddGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // AddGroupHook create a new group scoped webhook. @@ -153,23 +161,27 @@ func (s *GroupsService) AddGroupHook(gid interface{}, opt *AddGroupHookOptions, // GitLab API docs: // https://docs.gitlab.com/ee/api/groups.html#edit-group-hook type EditGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // EditGroupHook edits a hook for a specified group. @@ -216,3 +228,41 @@ func (s *GroupsService) DeleteGroupHook(pid interface{}, hook int, options ...Re return s.client.Do(req, nil) } + +// SetGroupCustomHeader creates or updates a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#set-a-custom-header +func (s *GroupsService) SetGroupCustomHeader(gid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGroupCustomHeader deletes a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-a-custom-header +func (s *GroupsService) DeleteGroupCustomHeader(gid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/github.com/xanzy/go-gitlab/group_members.go index c508adbfea..cdf225c3d8 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_members.go +++ b/vendor/github.com/xanzy/go-gitlab/group_members.go @@ -56,6 +56,7 @@ type GroupMember struct { AccessLevel AccessLevelValue `json:"access_level"` Email string `json:"email,omitempty"` GroupSAMLIdentity *GroupMemberSAMLIdentity `json:"group_saml_identity"` + MemberRole *MemberRole `json:"member_role"` } // ListGroupMembersOptions represents the available ListGroupMembers() and @@ -126,9 +127,10 @@ func (s *GroupsService) ListAllGroupMembers(gid interface{}, opt *ListGroupMembe // GitLab API docs: // https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project type AddGroupMemberOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // GetGroupMember gets a member of a group. @@ -156,6 +158,32 @@ func (s *GroupMembersService) GetGroupMember(gid interface{}, user int, options return gm, resp, nil } +// GetInheritedGroupMember get a member of a group or project, including +// inherited and invited members +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members +func (s *GroupMembersService) GetInheritedGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/all/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, err +} + // BillableGroupMember represents a GitLab billable group member. // // GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group @@ -305,8 +333,9 @@ func (s *GroupMembersService) DeleteShareWithGroup(gid interface{}, groupID int, // GitLab API docs: // https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project type EditGroupMemberOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // EditGroupMember updates a member of a group. diff --git a/vendor/github.com/xanzy/go-gitlab/group_milestones.go b/vendor/github.com/xanzy/go-gitlab/group_milestones.go index 179d05948d..f3089b2152 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_milestones.go +++ b/vendor/github.com/xanzy/go-gitlab/group_milestones.go @@ -58,11 +58,19 @@ func (m GroupMilestone) String() string { // https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones type ListGroupMilestonesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + SearchTitle *string `url:"search_title,omitempty" json:"search_title,omitempty"` + IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` + IncludeDescendents *bool `url:"include_descendents,omitempty" json:"include_descendents,omitempty"` + UpdatedBefore *ISOTime `url:"updated_before,omitempty" json:"updated_before,omitempty"` + UpdatedAfter *ISOTime `url:"updated_after,omitempty" json:"updated_after,omitempty"` + ContainingDate *ISOTime `url:"containing_date,omitempty" json:"containing_date,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` } // ListGroupMilestones returns a list of group milestones. diff --git a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go b/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go index 53bee7079f..addc383fb1 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go +++ b/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go @@ -52,6 +52,7 @@ type GroupEnvironmentAccessDescription struct { AccessLevelDescription string `json:"access_level_description"` UserID int `json:"user_id"` GroupID int `json:"group_id"` + GroupInheritanceType int `json:"group_inheritance_type"` } // GroupEnvironmentApprovalRule represents the approval rules for a group-level @@ -146,9 +147,10 @@ type ProtectGroupEnvironmentOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment type GroupEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // GroupEnvironmentApprovalRuleOptions represents the approval rules for a @@ -208,11 +210,12 @@ type UpdateGroupProtectedEnvironmentOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment type UpdateGroupEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } // UpdateGroupEnvironmentApprovalRuleOptions represents the updates to the diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go index 4acb0b1805..1360057a25 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go +++ b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go @@ -31,18 +31,60 @@ type GroupServiceAccount struct { UserName string `json:"username"` } -// CreateServiceAccount create a new service account user for a group. +// ListServiceAccountsOptions represents the available ListServiceAccounts() options. // -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +type ListServiceAccountsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListServiceAccounts gets a list of service acxcounts. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var sa []*GroupServiceAccount + resp, err := s.client.Do(req, &sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountOptions represents the available CreateServiceAccount() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user +type CreateServiceAccountOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// Creates a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) if err != nil { return nil, nil, err } @@ -60,17 +102,18 @@ func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...Request // CreateServiceAccountPersonalAccessToken() options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user type CreateServiceAccountPersonalAccessTokenOptions struct { - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } // CreateServiceAccountPersonalAccessToken add a new Personal Access Token for a // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { group, err := parseID(gid) if err != nil { @@ -116,3 +159,23 @@ func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, return pat, resp, nil } + +// DeleteServiceAccount Deletes a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user +func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_variables.go b/vendor/github.com/xanzy/go-gitlab/group_variables.go index ead4d0a994..69fe44592d 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_variables.go +++ b/vendor/github.com/xanzy/go-gitlab/group_variables.go @@ -43,6 +43,7 @@ type GroupVariable struct { Masked bool `json:"masked"` Raw bool `json:"raw"` EnvironmentScope string `json:"environment_scope"` + Description string `json:"description"` } func (v GroupVariable) String() string { @@ -81,18 +82,27 @@ func (s *GroupVariablesService) ListVariables(gid interface{}, opt *ListGroupVar return vs, resp, nil } +// GetGroupVariableOptions represents the available GetVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details +type GetGroupVariableOptions struct { + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` +} + // GetVariable gets a variable. // // GitLab API docs: // https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details -func (s *GroupVariablesService) GetVariable(gid interface{}, key string, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { +func (s *GroupVariablesService) GetVariable(gid interface{}, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { return nil, nil, err } @@ -114,11 +124,12 @@ func (s *GroupVariablesService) GetVariable(gid interface{}, key string, options type CreateGroupVariableOptions struct { Key *string `url:"key,omitempty" json:"key,omitempty"` Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreateVariable creates a new group variable. @@ -153,11 +164,12 @@ func (s *GroupVariablesService) CreateVariable(gid interface{}, opt *CreateGroup // https://docs.gitlab.com/ee/api/group_level_variables.html#update-variable type UpdateGroupVariableOptions struct { Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // UpdateVariable updates the position of an existing diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go index fcbd68de1f..34f0cab662 100644 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ b/vendor/github.com/xanzy/go-gitlab/groups.go @@ -39,37 +39,42 @@ type GroupsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html type Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - MembershipLock bool `json:"membership_lock"` - Visibility VisibilityValue `json:"visibility"` - LFSEnabled bool `json:"lfs_enabled"` - DefaultBranchProtection int `json:"default_branch_protection"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - RequestAccessEnabled bool `json:"request_access_enabled"` - RepositoryStorage string `json:"repository_storage"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - FileTemplateProjectID int `json:"file_template_project_id"` - ParentID int `json:"parent_id"` - Projects []*Project `json:"projects"` - Statistics *Statistics `json:"statistics"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ShareWithGroupLock bool `json:"share_with_group_lock"` - RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` - EmailsDisabled bool `json:"emails_disabled"` - MentionsDisabled bool `json:"mentions_disabled"` - RunnersToken string `json:"runners_token"` - SharedProjects []*Project `json:"shared_projects"` - SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` - SharedWithGroups []struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + MembershipLock bool `json:"membership_lock"` + Visibility VisibilityValue `json:"visibility"` + LFSEnabled bool `json:"lfs_enabled"` + DefaultBranchProtectionDefaults struct { + AllowedToPush []*GroupAccessLevel `json:"allowed_to_push"` + AllowForcePush bool `json:"allow_force_push"` + AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push"` + } `json:"default_branch_protection_defaults"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + RequestAccessEnabled bool `json:"request_access_enabled"` + RepositoryStorage string `json:"repository_storage"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + FileTemplateProjectID int `json:"file_template_project_id"` + ParentID int `json:"parent_id"` + Projects []*Project `json:"projects"` + Statistics *Statistics `json:"statistics"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ShareWithGroupLock bool `json:"share_with_group_lock"` + RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` + EmailsEnabled bool `json:"emails_enabled"` + MentionsDisabled bool `json:"mentions_disabled"` + RunnersToken string `json:"runners_token"` + SharedProjects []*Project `json:"shared_projects"` + SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` + SharedWithGroups []struct { GroupID int `json:"group_id"` GroupName string `json:"group_name"` GroupFullPath string `json:"group_full_path"` @@ -87,6 +92,20 @@ type Group struct { CreatedAt *time.Time `json:"created_at"` IPRestrictionRanges string `json:"ip_restriction_ranges"` WikiAccessLevel AccessControlValue `json:"wiki_access_level"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled bool `json:"emails_disabled"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection int `json:"default_branch_protection"` +} + +// GroupAccessLevel represents default branch protection defaults access levels. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type GroupAccessLevel struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` } // GroupAvatar represents a GitLab group avatar. @@ -120,8 +139,9 @@ type LDAPGroupLink struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#saml-group-links type SAMLGroupLink struct { - Name string `json:"name"` - AccessLevel AccessLevelValue `json:"access_level"` + Name string `json:"name"` + AccessLevel AccessLevelValue `json:"access_level"` + MemberRoleID int `json:"member_role_id,omitempty"` } // ListGroupsOptions represents the available ListGroups() options. @@ -335,28 +355,46 @@ func (s *GroupsService) DownloadAvatar(gid interface{}, options ...RequestOption // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group type CreateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"-"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"-"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: User DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` +} + +// DefaultBranchProtectionDefaultsOptions represents the available options for +// using default_branch_protection_defaults in CreateGroup() or UpdateGroup() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type DefaultBranchProtectionDefaultsOptions struct { + AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } // CreateGroup creates a new project group. Available only for users who can @@ -461,31 +499,37 @@ func (s *GroupsService) TransferSubGroup(gid interface{}, opt *TransferSubGroupO // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group type UpdateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` - SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` - PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` + SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` + PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` + IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` } // UpdateGroup updates an existing group; only available to group owners and @@ -560,17 +604,25 @@ func (s *GroupsService) UploadAvatar(gid interface{}, avatar io.Reader, filename return g, resp, nil } +// DeleteGroupOptions represents the available DeleteGroup() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group +type DeleteGroupOptions struct { + PermanentlyRemove *bool `url:"permanently_remove,omitempty" json:"permanently_remove,omitempty"` + FullPath *string `url:"full_path,omitempty" json:"full_path,omitempty"` +} + // DeleteGroup removes group with all projects inside. // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#remove-group -func (s *GroupsService) DeleteGroup(gid interface{}, options ...RequestOptionFunc) (*Response, error) { +func (s *GroupsService) DeleteGroup(gid interface{}, opt *DeleteGroupOptions, options ...RequestOptionFunc) (*Response, error) { group, err := parseID(gid) if err != nil { return nil, err } u := fmt.Sprintf("groups/%s", PathEscape(group)) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) if err != nil { return nil, err } @@ -863,6 +915,7 @@ func (s *GroupsService) GetGroupSAMLLink(gid interface{}, samlGroupName string, type AddGroupSAMLLinkOptions struct { SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // AddGroupSAMLLink creates a new group SAML link. Available only for users who @@ -982,7 +1035,9 @@ type GroupPushRules struct { FileNameRegex string `json:"file_name_regex"` MaxFileSize int `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` } // GetGroupPushRules gets the push rules of a group. @@ -1019,6 +1074,7 @@ type AddGroupPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1027,6 +1083,7 @@ type AddGroupPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // AddGroupPushRule adds push rules to the specified group. @@ -1063,6 +1120,7 @@ type EditGroupPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1071,6 +1129,7 @@ type EditGroupPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // EditGroupPushRule edits a push rule for a specified group. diff --git a/vendor/github.com/xanzy/go-gitlab/import.go b/vendor/github.com/xanzy/go-gitlab/import.go new file mode 100644 index 0000000000..a8164a70c6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/import.go @@ -0,0 +1,266 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" +) + +// ImportService handles communication with the import +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html +type ImportService struct { + client *Client +} + +// GitHubImport represents the response from an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type GitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s GitHubImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromGitHubOptions represents the available +// ImportRepositoryFromGitHub() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type ImportRepositoryFromGitHubOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + RepoID *int `url:"repo_id,omitempty" json:"repo_id,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` + OptionalStages struct { + SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` + AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` + CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` + } `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitHubOptions, options ...RequestOptionFunc) (*GitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github", opt, options) + if err != nil { + return nil, nil, err + } + + gi := new(GitHubImport) + resp, err := s.client.Do(req, gi) + if err != nil { + return nil, resp, err + } + + return gi, resp, nil +} + +// CancelledGitHubImport represents the response when canceling +// an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelledGitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` +} + +func (s CancelledGitHubImport) String() string { + return Stringify(s) +} + +// CancelGitHubProjectImportOptions represents the available +// CancelGitHubProjectImport() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelGitHubProjectImportOptions struct { + ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` +} + +// Cancel an import of a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +func (s *ImportService) CancelGitHubProjectImport(opt *CancelGitHubProjectImportOptions, options ...RequestOptionFunc) (*CancelledGitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/cancel", opt, options) + if err != nil { + return nil, nil, err + } + + cgi := new(CancelledGitHubImport) + resp, err := s.client.Do(req, cgi) + if err != nil { + return nil, resp, err + } + + return cgi, resp, nil +} + +// ImportGitHubGistsIntoGitLabSnippetsOptions represents the available +// ImportGitHubGistsIntoGitLabSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +type ImportGitHubGistsIntoGitLabSnippetsOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` +} + +// Import personal GitHub Gists into personal GitLab Snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGistsIntoGitLabSnippetsOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/gists", opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// BitbucketServerImport represents the response from an import from Bitbucket +// Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type BitbucketServerImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` +} + +func (s BitbucketServerImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketServerOptions represents the available ImportRepositoryFromBitbucketServer() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type ImportRepositoryFromBitbucketServerOptions struct { + BitbucketServerUrl *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` + BitbucketServerUsername *string `url:"bitbucket_server_username,omitempty" json:"bitbucket_server_username,omitempty"` + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + BitbucketServerProject *string `url:"bitbucket_server_project,omitempty" json:"bitbucket_server_project,omitempty"` + BitbucketServerRepo *string `url:"bitbucket_server_repo,omitempty" json:"bitbucket_server_repo,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + NewNamespace *string `url:"new_namespace,omitempty" json:"new_namespace,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from Bitbucket Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositoryFromBitbucketServerOptions, options ...RequestOptionFunc) (*BitbucketServerImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket_server", opt, options) + if err != nil { + return nil, nil, err + } + + bsi := new(BitbucketServerImport) + resp, err := s.client.Do(req, bsi) + if err != nil { + return nil, resp, err + } + + return bsi, resp, nil +} + +// BitbucketCloudImport represents the response from an import from Bitbucket +// Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type BitbucketCloudImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s BitbucketCloudImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketCloudOptions represents the available +// ImportRepositoryFromBitbucketCloud() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type ImportRepositoryFromBitbucketCloudOptions struct { + BitbucketUsername *string `url:"bitbucket_username,omitempty" json:"bitbucket_username,omitempty"` + BitbucketAppPassword *string `url:"bitbucket_app_password,omitempty" json:"bitbucket_app_password,omitempty"` + RepoPath *string `url:"repo_path,omitempty" json:"repo_path,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` +} + +// Import a repository from Bitbucket Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +func (s *ImportService) ImportRepositoryFromBitbucketCloud(opt *ImportRepositoryFromBitbucketCloudOptions, options ...RequestOptionFunc) (*BitbucketCloudImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket", opt, options) + if err != nil { + return nil, nil, err + } + + bci := new(BitbucketCloudImport) + resp, err := s.client.Do(req, bci) + if err != nil { + return nil, resp, err + } + + return bci, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_variables.go b/vendor/github.com/xanzy/go-gitlab/instance_variables.go index c376fd5cf3..58eef2b272 100644 --- a/vendor/github.com/xanzy/go-gitlab/instance_variables.go +++ b/vendor/github.com/xanzy/go-gitlab/instance_variables.go @@ -42,6 +42,7 @@ type InstanceVariable struct { Protected bool `json:"protected"` Masked bool `json:"masked"` Raw bool `json:"raw"` + Description string `json:"description"` } func (v InstanceVariable) String() string { @@ -105,10 +106,11 @@ func (s *InstanceVariablesService) GetVariable(key string, options ...RequestOpt type CreateInstanceVariableOptions struct { Key *string `url:"key,omitempty" json:"key,omitempty"` Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreateVariable creates a new instance level CI variable. @@ -139,10 +141,11 @@ func (s *InstanceVariablesService) CreateVariable(opt *CreateInstanceVariableOpt // https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#update-instance-variable type UpdateInstanceVariableOptions struct { Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // UpdateVariable updates the position of an existing diff --git a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go index ff96ba6984..35525b76d8 100644 --- a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go +++ b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go @@ -184,3 +184,101 @@ func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid interface{ return j.client.Do(req, nil) } + +// JobTokenAllowlistItem represents a single job token allowlist item. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html +type JobTokenAllowlistItem struct { + SourceProjectID int `json:"source_project_id"` + TargetGroupID int `json:"target_group_id"` +} + +// GetJobTokenAllowlistGroupsOptions represents the available +// GetJobTokenAllowlistGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups +type GetJobTokenAllowlistGroupsOptions struct { + ListOptions +} + +// GetJobTokenAllowListGroups fetches the CI/CD job token allowlist groups +// (job token scopes) of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups +func (j *JobTokenScopeService) GetJobTokenAllowlistGroups(pid interface{}, opt *GetJobTokenAllowlistGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Group + resp, err := j.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// AddGroupToJobTokenAllowlistOptions represents the available +// AddGroupToJobTokenAllowlist() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist +type AddGroupToJobTokenAllowlistOptions struct { + TargetGroupID *int `url:"target_group_id,omitempty" json:"target_group_id,omitempty"` +} + +// AddProjectToJobScopeGroupsAllowList adds a new group to a project's job token +// inbound groups allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist +func (j *JobTokenScopeService) AddGroupToJobTokenAllowlist(pid interface{}, opt *AddGroupToJobTokenAllowlistOptions, options ...RequestOptionFunc) (*JobTokenAllowlistItem, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + jt := new(JobTokenAllowlistItem) + resp, err := j.client.Do(req, jt) + if err != nil { + return nil, resp, err + } + + return jt, resp, nil +} + +// RemoveGroupFromJopTokenAllowlist removes a group from a project's job +// token inbound groups allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-group-from-a-cicd-job-token-allowlist +func (j *JobTokenScopeService) RemoveGroupFromJobTokenAllowlist(pid interface{}, targetGroup int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist/%d`, PathEscape(project), targetGroup) + + req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return j.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/github.com/xanzy/go-gitlab/jobs.go index 9ad0066581..f25c020f12 100644 --- a/vendor/github.com/xanzy/go-gitlab/jobs.go +++ b/vendor/github.com/xanzy/go-gitlab/jobs.go @@ -516,9 +516,9 @@ type PlayJobOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/jobs.html#run-a-job type JobVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // PlayJob triggers a manual action to start a job. diff --git a/vendor/github.com/xanzy/go-gitlab/member_roles.go b/vendor/github.com/xanzy/go-gitlab/member_roles.go index e3f1285f57..4d791a9137 100644 --- a/vendor/github.com/xanzy/go-gitlab/member_roles.go +++ b/vendor/github.com/xanzy/go-gitlab/member_roles.go @@ -17,17 +17,31 @@ type MemberRolesService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html type MemberRole struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - GroupId int `json:"group_id"` - BaseAccessLevel AccessLevelValue `json:"base_access_level"` - AdminMergeRequests bool `json:"admin_merge_requests,omitempty"` - AdminVulnerability bool `json:"admin_vulnerability,omitempty"` - ReadCode bool `json:"read_code,omitempty"` - ReadDependency bool `json:"read_dependency,omitempty"` - ReadVulnerability bool `json:"read_vulnerability,omitempty"` - ManageProjectAccessToken bool `json:"manage_project_access_token,omitempty"` + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + GroupID int `json:"group_id"` + BaseAccessLevel AccessLevelValue `json:"base_access_level"` + AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework bool `json:"admin_compliance_framework,omitempty"` + AdminGroupMembers bool `json:"admin_group_member,omitempty"` + AdminMergeRequests bool `json:"admin_merge_request,omitempty"` + AdminPushRules bool `json:"admin_push_rules,omitempty"` + AdminTerraformState bool `json:"admin_terraform_state,omitempty"` + AdminVulnerability bool `json:"admin_vulnerability,omitempty"` + AdminWebHook bool `json:"admin_web_hook,omitempty"` + ArchiveProject bool `json:"archive_project,omitempty"` + ManageDeployTokens bool `json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken bool `json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings bool `json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken bool `json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink bool `json:"manage_security_policy_link,omitempty"` + ReadCode bool `json:"read_code,omitempty"` + ReadRunners bool `json:"read_runners,omitempty"` + ReadDependency bool `json:"read_dependency,omitempty"` + ReadVulnerability bool `json:"read_vulnerability,omitempty"` + RemoveGroup bool `json:"remove_group,omitempty"` + RemoveProject bool `json:"remove_project,omitempty"` } // ListMemberRoles gets a list of member roles for a specified group. @@ -60,14 +74,29 @@ func (s *MemberRolesService) ListMemberRoles(gid interface{}, options ...Request // GitLab API docs: // https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group type CreateMemberRoleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` - AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` - ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` - ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` - ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + AdminCICDVariables *bool `url:"admin_cicd_variables" json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework *bool `url:"admin_compliance_framework" json:"admin_compliance_framework,omitempty"` + AdminGroupMembers *bool `url:"admin_group_member" json:"admin_group_member,omitempty"` + AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` + AdminPushRules *bool `url:"admin_push_rules" json:"admin_push_rules,omitempty"` + AdminTerraformState *bool `url:"admin_terraform_state" json:"admin_terraform_state,omitempty"` + AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` + AdminWebHook *bool `url:"admin_web_hook" json:"admin_web_hook,omitempty"` + ArchiveProject *bool `url:"archive_project" json:"archive_project,omitempty"` + ManageDeployTokens *bool `url:"manage_deploy_tokens" json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken *bool `url:"manage_group_access_tokens" json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings *bool `url:"manage_merge_request_settings" json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken *bool `url:"manage_project_access_tokens" json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink *bool `url:"manage_security_policy_link" json:"manage_security_policy_link,omitempty"` + ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` + ReadRunners *bool `url:"read_runners" json:"read_runners,omitempty"` + ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` + ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` + RemoveGroup *bool `url:"remove_group" json:"remove_group,omitempty"` + RemoveProject *bool `url:"remove_project" json:"remove_project,omitempty"` } // CreateMemberRole creates a new member role for a specified group. diff --git a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go b/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go index 0da0af3b4f..d2f1f81ff3 100644 --- a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go +++ b/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go @@ -94,6 +94,7 @@ type MergeRequestApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` SourceRule *ProjectApprovalRule `json:"source_rule"` @@ -180,6 +181,26 @@ func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid interface{}, mr return s.client.Do(req, nil) } +// ResetApprovalsOfMergeRequest clear all approvals of merge request on GitLab. +// Available only for bot users based on project or group tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#reset-approvals-of-a-merge-request +func (s *MergeRequestApprovalsService) ResetApprovalsOfMergeRequest(pid interface{}, mr int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/reset_approvals", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // ChangeMergeRequestApprovalConfigurationOptions represents the available // ChangeMergeRequestApprovalConfiguration() options. // diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/github.com/xanzy/go-gitlab/merge_requests.go index 24d333734a..a9e8d2e5c6 100644 --- a/vendor/github.com/xanzy/go-gitlab/merge_requests.go +++ b/vendor/github.com/xanzy/go-gitlab/merge_requests.go @@ -186,6 +186,7 @@ func (m MergeRequestDiffVersion) String() string { // https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests type ListMergeRequestsOptions struct { ListOptions + Approved *string `url:"approved,omitempty" json:"approved,omitempty"` State *string `url:"state,omitempty" json:"state,omitempty"` OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` Sort *string `url:"sort,omitempty" json:"sort,omitempty"` @@ -681,19 +682,20 @@ func (s *MergeRequestsService) GetIssuesClosedOnMerge(pid interface{}, mergeRequ // GitLab API docs: // https://docs.gitlab.com/ee/api/merge_requests.html#create-mr type CreateMergeRequestOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` - TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` - Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` - AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` + TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` + Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` + AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` } // CreateMergeRequest creates a new merge request. @@ -859,20 +861,29 @@ func (s *MergeRequestsService) CancelMergeWhenPipelineSucceeds(pid interface{}, return m, resp, nil } +// RebaseMergeRequestOptions represents the available RebaseMergeRequest() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request +type RebaseMergeRequestOptions struct { + SkipCI *bool `url:"skip_ci,omitempty" json:"skip_ci,omitempty"` +} + // RebaseMergeRequest automatically rebases the source_branch of the merge // request against its target_branch. If you don’t have permissions to push // to the merge request’s source branch, you’ll get a 403 Forbidden response. // // GitLab API docs: // https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request -func (s *MergeRequestsService) RebaseMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { +func (s *MergeRequestsService) RebaseMergeRequest(pid interface{}, mergeRequest int, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } u := fmt.Sprintf("projects/%s/merge_requests/%d/rebase", PathEscape(project), mergeRequest) - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { return nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/packages.go b/vendor/github.com/xanzy/go-gitlab/packages.go index fb7e2d1e2c..a6b252ed2e 100644 --- a/vendor/github.com/xanzy/go-gitlab/packages.go +++ b/vendor/github.com/xanzy/go-gitlab/packages.go @@ -89,14 +89,15 @@ func (s PackageTag) String() string { // // GitLab API docs: https://docs.gitlab.com/ee/api/packages.html type PackageFile struct { - ID int `json:"id"` - PackageID int `json:"package_id"` - CreatedAt *time.Time `json:"created_at"` - FileName string `json:"file_name"` - Size int `json:"size"` - FileMD5 string `json:"file_md5"` - FileSHA1 string `json:"file_sha1"` - Pipeline *[]Pipeline `json:"pipelines"` + ID int `json:"id"` + PackageID int `json:"package_id"` + CreatedAt *time.Time `json:"created_at"` + FileName string `json:"file_name"` + Size int `json:"size"` + FileMD5 string `json:"file_md5"` + FileSHA1 string `json:"file_sha1"` + FileSHA256 string `json:"file_sha256"` + Pipeline *[]Pipeline `json:"pipelines"` } func (s PackageFile) String() string { diff --git a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go index ba32bd8fd6..14aee9ee05 100644 --- a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go +++ b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go @@ -57,7 +57,14 @@ func (p PersonalAccessToken) String() string { // https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens type ListPersonalAccessTokensOptions struct { ListOptions - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + LastUsedAfter *ISOTime `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` + LastUsedBefore *ISOTime `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` + Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` } // ListPersonalAccessTokens gets a list of all personal access tokens. @@ -83,8 +90,8 @@ func (s *PersonalAccessTokensService) ListPersonalAccessTokens(opt *ListPersonal // // GitLab API docs: // https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id -func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(user int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", user) +func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("personal_access_tokens/%d", token) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { return nil, nil, err @@ -129,12 +136,17 @@ type RotatePersonalAccessTokenOptions struct { ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } -// RotatePersonalAccessToken revokes a token and returns a new token that +// RotatePersonalAccessToken is a backwards-compat shim for RotatePersonalAccessTokenByID. +func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return s.RotatePersonalAccessTokenByID(token, opt, options...) +} + +// RotatePersonalAccessTokenByID revokes a token and returns a new token that // expires in one week per default. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token -func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-personal-access-token-id +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { u := fmt.Sprintf("personal_access_tokens/%d/rotate", token) req, err := s.client.NewRequest(http.MethodPost, u, opt, options) @@ -151,11 +163,38 @@ func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt * return pat, resp, nil } -// RevokePersonalAccessToken revokes a personal access token. +// RotatePersonalAccessTokenSelf revokes the currently authenticated token +// and returns a new token that expires in one week per default. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#revoke-a-personal-access-token +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-request-header +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := "personal_access_tokens/self/rotate" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RevokePersonalAccessToken is a backwards-compat shim for RevokePersonalAccessTokenByID. func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) { + return s.RevokePersonalAccessTokenByID(token, options...) +} + +// RevokePersonalAccessTokenByID revokes a personal access token by its ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) { u := fmt.Sprintf("personal_access_tokens/%d", token) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) @@ -165,3 +204,19 @@ func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, optio return s.client.Do(req, nil) } + +// RevokePersonalAccessTokenSelf revokes the currently authenticated +// personal access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) { + u := "personal_access_tokens/self" + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go b/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go index d7c85df6d8..51477f21bd 100644 --- a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go +++ b/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go @@ -56,6 +56,7 @@ type LastPipeline struct { SHA string `json:"sha"` Ref string `json:"ref"` Status string `json:"status"` + WebURL string `json:"web_url"` } // ListPipelineSchedulesOptions represents the available ListPipelineTriggers() options. @@ -293,9 +294,9 @@ func (s *PipelineSchedulesService) RunPipelineSchedule(pid interface{}, schedule // GitLab API docs: // https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule type CreatePipelineScheduleVariableOptions struct { - Key *string `url:"key" json:"key"` - Value *string `url:"value" json:"value"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key" json:"key"` + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreatePipelineScheduleVariable creates a pipeline schedule variable. @@ -329,8 +330,8 @@ func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid interface{ // GitLab API docs: // https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable type EditPipelineScheduleVariableOptions struct { - Value *string `url:"value" json:"value"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // EditPipelineScheduleVariable creates a pipeline schedule variable. diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go index 6dee9dfe7c..3f2448447e 100644 --- a/vendor/github.com/xanzy/go-gitlab/pipelines.go +++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go @@ -34,9 +34,9 @@ type PipelinesService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html type PipelineVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType string `json:"variable_type"` + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` } // Pipeline represents a GitLab pipeline. @@ -309,9 +309,9 @@ type CreatePipelineOptions struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline type PipelineVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreatePipeline creates a new project pipeline. diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/github.com/xanzy/go-gitlab/project_members.go index 810fd36f9c..37d4b8a2e6 100644 --- a/vendor/github.com/xanzy/go-gitlab/project_members.go +++ b/vendor/github.com/xanzy/go-gitlab/project_members.go @@ -149,9 +149,10 @@ func (s *ProjectMembersService) GetInheritedProjectMember(pid interface{}, user // GitLab API docs: // https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project type AddProjectMemberOptions struct { - UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // AddProjectMember adds a user to a project team. This is an idempotent @@ -187,8 +188,9 @@ func (s *ProjectMembersService) AddProjectMember(pid interface{}, opt *AddProjec // GitLab API docs: // https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project type EditProjectMemberOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` } // EditProjectMember updates a project team member to a specified access level.. diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go index 2d46685f24..cc23f265d0 100644 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ b/vendor/github.com/xanzy/go-gitlab/projects.go @@ -41,7 +41,6 @@ type Project struct { ID int `json:"id"` Description string `json:"description"` DefaultBranch string `json:"default_branch"` - Public bool `json:"public"` Visibility VisibilityValue `json:"visibility"` SSHURLToRepo string `json:"ssh_url_to_repo"` HTTPURLToRepo string `json:"http_url_to_repo"` @@ -87,6 +86,7 @@ type Project struct { OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` + PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` LFSEnabled bool `json:"lfs_enabled"` RepositoryStorage string `json:"repository_storage"` @@ -135,10 +135,12 @@ type Project struct { ImportError string `json:"import_error"` CIDefaultGitDepth int `json:"ci_default_git_depth"` CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` + CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` CISeperateCache bool `json:"ci_separated_caches"` CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` CIOptInJWT bool `json:"ci_opt_in_jwt"` CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` + CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` PublicJobs bool `json:"public_jobs"` BuildTimeout int `json:"build_timeout"` AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` @@ -166,6 +168,8 @@ type Project struct { SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` + ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` + PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` // Deprecated: Use EmailsEnabled instead EmailsDisabled bool `json:"emails_disabled"` @@ -290,6 +294,7 @@ type Statistics struct { PackagesSize int64 `json:"packages_size"` SnippetsSize int64 `json:"snippets_size"` UploadsSize int64 `json:"uploads_size"` + ContainerRegistrySize int64 `json:"container_registry_size"` } func (s Project) String() string { @@ -304,6 +309,7 @@ type ProjectApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` Users []*BasicUser `json:"users"` @@ -638,6 +644,7 @@ type CreateProjectOptions struct { Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` + ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` @@ -826,6 +833,7 @@ func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUs // GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project type EditProjectOptions struct { AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` + AllowPipelineTriggerApproveDeployment *bool `url:"allow_pipeline_trigger_approve_deployment,omitempty" json:"allow_pipeline_trigger_approve_deployment,omitempty"` OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` @@ -841,7 +849,9 @@ type EditProjectOptions struct { CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` + CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` + CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` @@ -869,6 +879,7 @@ type EditProjectOptions struct { MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` + ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` @@ -884,6 +895,7 @@ type EditProjectOptions struct { InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` + PreventMergeWithoutJiraIssue *bool `url:"prevent_merge_without_jira_issue,omitempty" json:"prevent_merge_without_jira_issue,omitempty"` PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` @@ -1032,6 +1044,44 @@ func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionF return p, resp, nil } +// ListProjectInvidedGroupOptions represents the available +// ListProjectsInvitedGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +type ListProjectInvidedGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// ListProjectsInvitedGroups lists invited groups of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pg []*ProjectGroup + resp, err := s.client.Do(req, &pg) + if err != nil { + return nil, resp, err + } + + return pg, resp, nil +} + // UnstarProject unstars a given project. // // GitLab API docs: @@ -1190,29 +1240,41 @@ type ProjectMember struct { AvatarURL string `json:"avatar_url"` } +// HookCustomHeader represents a project or group hook custom header +// Note: "Key" is returned from the Get operation, but "Value" is not +// The List operation doesn't return any headers at all for Projects, +// but does return headers for Groups +type HookCustomHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + // ProjectHook represents a project hook. // // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#list-project-hooks type ProjectHook struct { - ID int `json:"id"` - URL string `json:"url"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - ProjectID int `json:"project_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - CreatedAt *time.Time `json:"created_at"` + ID int `json:"id"` + URL string `json:"url"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + ProjectID int `json:"project_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + CreatedAt *time.Time `json:"created_at"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + CustomHeaders []*HookCustomHeader `json:"custom_headers"` } // ListProjectHooksOptions represents the available ListProjectHooks() options. @@ -1275,22 +1337,25 @@ func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...R // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#add-project-hook type AddProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // AddProjectHook adds a hook to a specified project. @@ -1323,22 +1388,25 @@ func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOpt // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#edit-project-hook type EditProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` } // EditProjectHook edits a hook for a specified project. @@ -1386,6 +1454,80 @@ func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options . return s.client.Do(req, nil) } +// TriggerTestProjectHook Trigger a test hook for a specified project. +// +// In GitLab 17.0 and later, this endpoint has a special rate limit. +// In GitLab 17.0 the rate was three requests per minute for each project hook. +// In GitLab 17.1 this was changed to five requests per minute for each project +// and authenticated user. +// +// To disable this limit on self-managed GitLab and GitLab Dedicated, +// an administrator can disable the feature flag named web_hook_test_api_endpoint_rate_limit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#trigger-a-test-project-hook +func (s *ProjectsService) TriggerTestProjectHook(pid interface{}, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/test/%s", PathEscape(project), hook, string(event)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetHookCustomHeaderOptions represents a project or group hook custom header. +// If the header isn't present, it will be created. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +type SetHookCustomHeaderOptions struct { + Value *string `json:"value,omitempty"` +} + +// SetProjectCustomHeader creates or updates a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +func (s *ProjectsService) SetProjectCustomHeader(pid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectCustomHeader deletes a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-a-custom-header +func (s *ProjectsService) DeleteProjectCustomHeader(pid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // ProjectForkRelation represents a project fork relationship. // // GitLab API docs: @@ -1449,6 +1591,7 @@ func (s *ProjectsService) DeleteProjectForkRelation(pid interface{}, options ... type ProjectFile struct { Alt string `json:"alt"` URL string `json:"url"` + FullPath string `json:"full_path"` Markdown string `json:"markdown"` } @@ -1560,7 +1703,9 @@ type ProjectPushRules struct { FileNameRegex string `json:"file_name_regex"` MaxFileSize int `json:"max_file_size"` CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` } // GetProjectPushRules gets the push rules of a project. @@ -1597,6 +1742,7 @@ type AddProjectPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1605,6 +1751,7 @@ type AddProjectPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // AddProjectPushRule adds a push rule to a specified project. @@ -1641,6 +1788,7 @@ type EditProjectPushRuleOptions struct { AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` @@ -1649,6 +1797,7 @@ type EditProjectPushRuleOptions struct { MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` } // EditProjectPushRule edits a push rule for a specified project. diff --git a/vendor/github.com/xanzy/go-gitlab/protected_environments.go b/vendor/github.com/xanzy/go-gitlab/protected_environments.go index 3dc7fbdfba..50ee31173a 100644 --- a/vendor/github.com/xanzy/go-gitlab/protected_environments.go +++ b/vendor/github.com/xanzy/go-gitlab/protected_environments.go @@ -52,6 +52,7 @@ type EnvironmentAccessDescription struct { AccessLevelDescription string `json:"access_level_description"` UserID int `json:"user_id"` GroupID int `json:"group_id"` + GroupInheritanceType int `json:"group_inheritance_type"` } // EnvironmentApprovalRule represents the approval rules for a protected @@ -146,9 +147,10 @@ type ProtectRepositoryEnvironmentsOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment type EnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` } // EnvironmentApprovalRuleOptions represents the approval rules for a protected @@ -209,11 +211,12 @@ type UpdateProtectedEnvironmentsOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment type UpdateEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` } // UpdateEnvironmentApprovalRuleOptions represents the updates to the approval @@ -242,7 +245,7 @@ func (s *ProtectedEnvironmentsService) UpdateProtectedEnvironments(pid interface if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), environment) + u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { diff --git a/vendor/github.com/xanzy/go-gitlab/releaselinks.go b/vendor/github.com/xanzy/go-gitlab/releaselinks.go index f7dd05f172..8cde15f512 100644 --- a/vendor/github.com/xanzy/go-gitlab/releaselinks.go +++ b/vendor/github.com/xanzy/go-gitlab/releaselinks.go @@ -101,10 +101,11 @@ func (s *ReleaseLinksService) GetReleaseLink(pid interface{}, tagName string, li // // GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link type CreateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // CreateReleaseLink creates a link. @@ -137,10 +138,11 @@ func (s *ReleaseLinksService) CreateReleaseLink(pid interface{}, tagName string, // // GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link type UpdateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // UpdateReleaseLink updates an asset link. diff --git a/vendor/github.com/xanzy/go-gitlab/releases.go b/vendor/github.com/xanzy/go-gitlab/releases.go index 3d6a897728..97cbff7bb9 100644 --- a/vendor/github.com/xanzy/go-gitlab/releases.go +++ b/vendor/github.com/xanzy/go-gitlab/releases.go @@ -187,10 +187,11 @@ type ReleaseAssetsOptions struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/releases/index.html#create-a-release type ReleaseAssetLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` } // CreateRelease creates a release. diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/github.com/xanzy/go-gitlab/repository_files.go index fd96fa4336..7ffaa93b56 100644 --- a/vendor/github.com/xanzy/go-gitlab/repository_files.go +++ b/vendor/github.com/xanzy/go-gitlab/repository_files.go @@ -174,7 +174,9 @@ func (b FileBlameRange) String() string { // GitLab API docs: // https://docs.gitlab.com/ee/api/repository_files.html#get-file-blame-from-repository type GetFileBlameOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + RangeStart *int `url:"range[start],omitempty" json:"range[start],omitempty"` + RangeEnd *int `url:"range[end],omitempty" json:"range[end],omitempty"` } // GetFileBlame allows you to receive blame information. Each blame range @@ -213,6 +215,7 @@ func (s *RepositoryFilesService) GetFileBlame(pid interface{}, file string, opt // https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository type GetRawFileOptions struct { Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + LFS *bool `url:"lfs,omitempty" json:"lfs,omitempty"` } // GetRawFile allows you to receive the raw file in repository. diff --git a/vendor/github.com/xanzy/go-gitlab/resource_group.go b/vendor/github.com/xanzy/go-gitlab/resource_group.go new file mode 100644 index 0000000000..b11cd8be7a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_group.go @@ -0,0 +1,165 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceGroupService handles communication with the resource +// group related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroupService struct { + client *Client +} + +// ResourceGrouop represents a GitLab Project Resource Group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroup struct { + ID int `json:"id"` + Key string `json:"key"` + ProcessMode string `json:"process_mode"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// Gets a string representation of a ResourceGroup +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +func (rg ResourceGroup) String() string { + return Stringify(rg) +} + +// GetAllResourceGroupsForAProject allows you to get all resource +// groups associated with a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-all-resource-groups-for-a-project +func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid interface{}, options ...RequestOptionFunc) ([]*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var rgs []*ResourceGroup + resp, err := s.client.Do(req, &rgs) + if err != nil { + return nil, resp, err + } + + return rgs, resp, nil +} + +// GetASpecificResourceGroup allows you to get a specific +// resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-a-specific-resource-group +func (s *ResourceGroupService) GetASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} + +// ListUpcomingJobsForASpecificResourceGroup allows you to get all +// upcoming jobs for a specific resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#list-upcoming-jobs-for-a-specific-resource-group +func (s *ResourceGroupService) ListUpcomingJobsForASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) ([]*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s/upcoming_jobs", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var js []*Job + resp, err := s.client.Do(req, &js) + if err != nil { + return nil, resp, err + } + + return js, resp, nil +} + +// EditAnExistingResourceGroupOptions represents the available +// EditAnExistingResourceGroup options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +type EditAnExistingResourceGroupOptions struct { + ProcessMode *ResourceGroupProcessMode `url:"process_mode,omitempty" json:"process_mode,omitempty"` +} + +// EditAnExistingResourceGroup allows you to edit a specific +// resource group for a given project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +func (s *ResourceGroupService) EditAnExistingResourceGroup(pid interface{}, key string, opts *EditAnExistingResourceGroupOptions, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodPut, u, opts, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go b/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go index 7ad354a7b2..142cb9e6ee 100644 --- a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go +++ b/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go @@ -50,7 +50,7 @@ type Iteration struct { ID int `json:"id"` IID int `json:"iid"` Sequence int `json:"sequence"` - GroupId int `json:"group_id"` + GroupID int `json:"group_id"` Title string `json:"title"` Description string `json:"description"` State int `json:"state"` diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go index f3c2c8d652..fcaa71ecc4 100644 --- a/vendor/github.com/xanzy/go-gitlab/services.go +++ b/vendor/github.com/xanzy/go-gitlab/services.go @@ -27,14 +27,14 @@ import ( // ServicesService handles communication with the services related methods of // the GitLab API. // -// GitLab API docs: https://docs.gitlab.com/ee/api/services.html +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html type ServicesService struct { client *Client } // Service represents a GitLab service. // -// GitLab API docs: https://docs.gitlab.com/ee/api/services.html +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html type Service struct { ID int `json:"id"` Title string `json:"title"` @@ -61,7 +61,7 @@ type Service struct { // ListServices gets a list of all active services. // -// GitLab API docs: https://docs.gitlab.com/ee/api/services.html#list-all-active-services +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html#list-all-active-integrations func (s *ServicesService) ListServices(pid interface{}, options ...RequestOptionFunc) ([]*Service, *Response, error) { project, err := parseID(pid) if err != nil { @@ -86,7 +86,7 @@ func (s *ServicesService) ListServices(pid interface{}, options ...RequestOption // CustomIssueTrackerService represents Custom Issue Tracker service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#custom-issue-tracker +// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker type CustomIssueTrackerService struct { Service Properties *CustomIssueTrackerServiceProperties `json:"properties"` @@ -95,7 +95,7 @@ type CustomIssueTrackerService struct { // CustomIssueTrackerServiceProperties represents Custom Issue Tracker specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#custom-issue-tracker +// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker type CustomIssueTrackerServiceProperties struct { ProjectURL string `json:"project_url,omitempty"` IssuesURL string `json:"issues_url,omitempty"` @@ -105,7 +105,7 @@ type CustomIssueTrackerServiceProperties struct { // GetCustomIssueTrackerService gets Custom Issue Tracker service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-custom-issue-tracker-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-custom-issue-tracker-settings func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -131,20 +131,17 @@ func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-custom-issue-tracker-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker type SetCustomIssueTrackerServiceOptions struct { NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` } // SetCustomIssueTrackerService sets Custom Issue Tracker service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-custom-issue-tracker-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -163,7 +160,7 @@ func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *Set // DeleteCustomIssueTrackerService deletes Custom Issue Tracker service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-custom-issue-tracker-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-a-custom-issue-tracker func (s *ServicesService) DeleteCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -182,7 +179,7 @@ func (s *ServicesService) DeleteCustomIssueTrackerService(pid interface{}, optio // DataDogService represents DataDog service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#datadog +// https://docs.gitlab.com/ee/api/integrations.html#datadog type DataDogService struct { Service Properties *DataDogServiceProperties `json:"properties"` @@ -191,7 +188,7 @@ type DataDogService struct { // DataDogServiceProperties represents DataDog specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#datadog +// https://docs.gitlab.com/ee/api/integrations.html#datadog type DataDogServiceProperties struct { APIURL string `url:"api_url,omitempty" json:"api_url,omitempty"` DataDogEnv string `url:"datadog_env,omitempty" json:"datadog_env,omitempty"` @@ -204,7 +201,7 @@ type DataDogServiceProperties struct { // GetDataDogService gets DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-datadog-integration-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-datadog-settings func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestOptionFunc) (*DataDogService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -230,7 +227,7 @@ func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestO // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-datadog-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog type SetDataDogServiceOptions struct { APIKey *string `url:"api_key,omitempty" json:"api_key,omitempty"` APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` @@ -244,7 +241,7 @@ type SetDataDogServiceOptions struct { // SetDataDogService sets DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-datadog-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -263,7 +260,7 @@ func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServ // DeleteDataDogService deletes the DataDog service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#disable-datadog-integration +// https://docs.gitlab.com/ee/api/integrations.html#disable-datadog func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -282,7 +279,7 @@ func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...Reque // DiscordService represents Discord service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#discord +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications type DiscordService struct { Service Properties *DiscordServiceProperties `json:"properties"` @@ -291,7 +288,7 @@ type DiscordService struct { // DiscordServiceProperties represents Discord specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#discord +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications type DiscordServiceProperties struct { BranchesToBeNotified string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` NotifyOnlyBrokenPipelines bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` @@ -300,7 +297,7 @@ type DiscordServiceProperties struct { // GetDiscordService gets Discord service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-discord-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-discord-notifications-settings func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestOptionFunc) (*DiscordService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -326,26 +323,41 @@ func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestO // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-discord-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications type SetDiscordServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialIssuesChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + GroupConfidentialMentionsEvents *bool `url:"group_confidential_mentions_events,omitempty" json:"group_confidential_mentions_events,omitempty"` + GroupConfidentialMentionsChannel *string `url:"group_confidential_mentions_channel,omitempty" json:"group_confidential_mentions_channel,omitempty"` + GroupMentionsEvents *bool `url:"group_mentions_events,omitempty" json:"group_mentions_events,omitempty"` + GroupMentionsChannel *string `url:"group_mentions_channel,omitempty" json:"group_mentions_channel,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` } // SetDiscordService sets Discord service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-discord-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -364,7 +376,7 @@ func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServ // DeleteDiscordService deletes Discord service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-discord-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-discord-notifications func (s *ServicesService) DeleteDiscordService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -383,7 +395,7 @@ func (s *ServicesService) DeleteDiscordService(pid interface{}, options ...Reque // DroneCIService represents Drone CI service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#drone-ci +// https://docs.gitlab.com/ee/api/integrations.html#drone type DroneCIService struct { Service Properties *DroneCIServiceProperties `json:"properties"` @@ -392,9 +404,8 @@ type DroneCIService struct { // DroneCIServiceProperties represents Drone CI specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#drone-ci +// https://docs.gitlab.com/ee/api/integrations.html#drone type DroneCIServiceProperties struct { - Token string `json:"token"` DroneURL string `json:"drone_url"` EnableSSLVerification bool `json:"enable_ssl_verification"` } @@ -402,7 +413,7 @@ type DroneCIServiceProperties struct { // GetDroneCIService gets Drone CI service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-drone-ci-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-drone-settings func (s *ServicesService) GetDroneCIService(pid interface{}, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -428,17 +439,20 @@ func (s *ServicesService) GetDroneCIService(pid interface{}, options ...RequestO // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-drone-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone type SetDroneCIServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty"` DroneURL *string `url:"drone_url,omitempty" json:"drone_url,omitempty"` EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` } // SetDroneCIService sets Drone CI service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-drone-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -457,7 +471,7 @@ func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServ // DeleteDroneCIService deletes Drone CI service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-drone-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-drone func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -524,7 +538,7 @@ func (s *ServicesService) GetEmailsOnPushService(pid interface{}, options ...Req // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-emails-on-push-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push type SetEmailsOnPushServiceOptions struct { Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` DisableDiffs *bool `url:"disable_diffs,omitempty" json:"disable_diffs,omitempty"` @@ -537,7 +551,7 @@ type SetEmailsOnPushServiceOptions struct { // SetEmailsOnPushService sets Emails on Push service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-emails-on-push-integration +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -556,7 +570,7 @@ func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmails // DeleteEmailsOnPushService deletes Emails on Push service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push-integration +// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push func (s *ServicesService) DeleteEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -575,7 +589,7 @@ func (s *ServicesService) DeleteEmailsOnPushService(pid interface{}, options ... // ExternalWikiService represents External Wiki service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#external-wiki +// https://docs.gitlab.com/ee/api/integrations.html#external-wiki type ExternalWikiService struct { Service Properties *ExternalWikiServiceProperties `json:"properties"` @@ -584,7 +598,7 @@ type ExternalWikiService struct { // ExternalWikiServiceProperties represents External Wiki specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#external-wiki +// https://docs.gitlab.com/ee/api/integrations.html#external-wiki type ExternalWikiServiceProperties struct { ExternalWikiURL string `json:"external_wiki_url"` } @@ -592,7 +606,7 @@ type ExternalWikiServiceProperties struct { // GetExternalWikiService gets External Wiki service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-external-wiki-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-external-wiki-settings func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -618,7 +632,7 @@ func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...Req // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-external-wiki-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki type SetExternalWikiServiceOptions struct { ExternalWikiURL *string `url:"external_wiki_url,omitempty" json:"external_wiki_url,omitempty"` } @@ -626,7 +640,7 @@ type SetExternalWikiServiceOptions struct { // SetExternalWikiService sets External Wiki service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-external-wiki-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -645,7 +659,7 @@ func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExtern // DeleteExternalWikiService deletes External Wiki service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-external-wiki-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-an-external-wiki func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -664,7 +678,7 @@ func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ... // GithubService represents Github service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#github-premium +// https://docs.gitlab.com/ee/api/integrations.html#github type GithubService struct { Service Properties *GithubServiceProperties `json:"properties"` @@ -673,7 +687,7 @@ type GithubService struct { // GithubServiceProperties represents Github specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#github-premium +// https://docs.gitlab.com/ee/api/integrations.html#github type GithubServiceProperties struct { RepositoryURL string `json:"repository_url"` StaticContext bool `json:"static_context"` @@ -682,7 +696,7 @@ type GithubServiceProperties struct { // GetGithubService gets Github service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-github-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-github-settings func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOptionFunc) (*GithubService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -708,7 +722,7 @@ func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOp // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-github-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github type SetGithubServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty"` RepositoryURL *string `url:"repository_url,omitempty" json:"repository_url,omitempty"` @@ -718,7 +732,7 @@ type SetGithubServiceOptions struct { // SetGithubService sets Github service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-github-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -737,7 +751,7 @@ func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServic // DeleteGithubService deletes Github service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-github-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-github func (s *ServicesService) DeleteGithubService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -753,11 +767,137 @@ func (s *ServicesService) DeleteGithubService(pid interface{}, options ...Reques return s.client.Do(req, nil) } +// SlackApplication represents GitLab for slack application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app +type SlackApplication struct { + Service + Properties *SlackApplicationProperties `json:"properties"` +} + +// SlackApplicationProperties represents GitLab for slack application specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app +type SlackApplicationProperties struct { + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + AlertEvents bool `json:"alert_events"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + NoteEvents bool `json:"note_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + DeploymentEvents bool `json:"deployment_events"` + IncidentsEvents bool `json:"incidents_events"` + PipelineEvents bool `json:"pipeline_events"` + PushEvents bool `json:"push_events"` + TagPushEvents bool `json:"tag_push_events"` + VulnerabilityEvents bool `json:"vulnerability_events"` + WikiPageEvents bool `json:"wiki_page_events"` + + // Deprecated: This parameter has been replaced with BranchesToBeNotified. + NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch"` +} + +// GetSlackApplication gets the GitLab for Slack app integration settings for a +// project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-gitlab-for-slack-app-settings +func (s *ServicesService) GetSlackApplication(pid interface{}, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(SlackApplication) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetSlackApplicationOptions represents the available SetSlackApplication() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app +type SetSlackApplicationOptions struct { + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + IncidentsEvents *bool `url:"incidents_events,omitempty" json:"incidents_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + + // Deprecated: This parameter has been replaced with BranchesToBeNotified. + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` +} + +// SetSlackApplication update the GitLab for Slack app integration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app +func (s *ServicesService) SetSlackApplication(pid interface{}, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DisableSlackApplication disable the GitLab for Slack app integration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-gitlab-for-slack-app +func (s *ServicesService) DisableSlackApplication(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // SetGitLabCIServiceOptions represents the available SetGitLabCIService() // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-gitlab-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service type SetGitLabCIServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty"` ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` @@ -766,7 +906,7 @@ type SetGitLabCIServiceOptions struct { // SetGitLabCIService sets GitLab CI service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-gitlab-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -785,7 +925,7 @@ func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCISe // DeleteGitLabCIService deletes GitLab CI service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-gitlab-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-gitlab-ci-service func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -805,7 +945,7 @@ func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...Requ // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-hipchat-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service type SetHipChatServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty" ` Room *string `url:"room,omitempty" json:"room,omitempty"` @@ -814,7 +954,7 @@ type SetHipChatServiceOptions struct { // SetHipChatService sets HipChat service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-hipchat-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -833,7 +973,7 @@ func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServ // DeleteHipChatService deletes HipChat service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-hipchat-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-hipchat-service func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -852,7 +992,7 @@ func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...Reque // JenkinsCIService represents Jenkins CI service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#jenkins-ci +// https://docs.gitlab.com/ee/api/integrations.html#jenkins type JenkinsCIService struct { Service Properties *JenkinsCIServiceProperties `json:"properties"` @@ -861,17 +1001,18 @@ type JenkinsCIService struct { // JenkinsCIServiceProperties represents Jenkins CI specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#jenkins-ci +// https://docs.gitlab.com/ee/api/integrations.html#jenkins type JenkinsCIServiceProperties struct { - URL string `json:"jenkins_url"` - ProjectName string `json:"project_name"` - Username string `json:"username"` + URL string `json:"jenkins_url"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + ProjectName string `json:"project_name"` + Username string `json:"username"` } // GetJenkinsCIService gets Jenkins CI service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-jenkins-ci-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-jenkins-settings func (s *ServicesService) GetJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -897,21 +1038,22 @@ func (s *ServicesService) GetJenkinsCIService(pid interface{}, options ...Reques // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#jenkins-ci +// https://docs.gitlab.com/ee/api/integrations.html#jenkins type SetJenkinsCIServiceOptions struct { - URL *string `url:"jenkins_url,omitempty" json:"jenkins_url,omitempty"` - ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + URL *string `url:"jenkins_url,omitempty" json:"jenkins_url,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` } // SetJenkinsCIService sets Jenkins service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#create-edit-jenkins-ci-service +// https://docs.gitlab.com/ee/api/integrations.html#set-up-jenkins func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -930,7 +1072,7 @@ func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCI // DeleteJenkinsCIService deletes Jenkins CI service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-jira-service +// https://docs.gitlab.com/ee/api/integrations.html#disable-jenkins func (s *ServicesService) DeleteJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -949,7 +1091,7 @@ func (s *ServicesService) DeleteJenkinsCIService(pid interface{}, options ...Req // JiraService represents Jira service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#jira +// https://docs.gitlab.com/ee/api/integrations.html#jira type JiraService struct { Service Properties *JiraServiceProperties `json:"properties"` @@ -958,14 +1100,27 @@ type JiraService struct { // JiraServiceProperties represents Jira specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#jira +// https://docs.gitlab.com/ee/api/integrations.html#jira type JiraServiceProperties struct { - URL string `json:"url"` - APIURL string `json:"api_url"` - ProjectKey string `json:"project_key" ` - Username string `json:"username" ` - Password string `json:"password" ` - JiraIssueTransitionID string `json:"jira_issue_transition_id"` + URL string `json:"url"` + APIURL string `json:"api_url"` + Username string `json:"username" ` + Password string `json:"password" ` + Active bool `json:"active"` + JiraAuthType int `json:"jira_auth_type"` + JiraIssuePrefix string `json:"jira_issue_prefix"` + JiraIssueRegex string `json:"jira_issue_regex"` + JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` + JiraIssueTransitionID string `json:"jira_issue_transition_id"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + IssuesEnabled bool `json:"issues_enabled"` + ProjectKeys []string `json:"project_keys" ` + UseInheritedSettings bool `json:"use_inherited_settings"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey string `json:"project_key" ` } // UnmarshalJSON decodes the Jira Service Properties. @@ -1001,13 +1156,13 @@ func (p *JiraServiceProperties) UnmarshalJSON(b []byte) error { // GetJiraService gets Jira service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-jira-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-jira-service-settings func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOptionFunc) (*JiraService, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -1027,30 +1182,39 @@ func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOpti // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-jira-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service type SetJiraServiceOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` - Username *string `url:"username,omitempty" json:"username,omitempty" ` - Password *string `url:"password,omitempty" json:"password,omitempty" ` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` - CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty" ` + Password *string `url:"password,omitempty" json:"password,omitempty" ` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + JiraAuthType *int `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` + JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` + JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` + JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` + JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` + CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + ProjectKeys *[]string `url:"project_keys,comma,omitempty" json:"project_keys,omitempty" ` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` } // SetJiraService sets Jira service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-jira-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { @@ -1063,13 +1227,13 @@ func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOpt // DeleteJiraService deletes Jira service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-jira-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-jira-service func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { @@ -1082,7 +1246,7 @@ func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestO // MattermostService represents Mattermost service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#mattermost-notifications +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications type MattermostService struct { Service Properties *MattermostServiceProperties `json:"properties"` @@ -1091,7 +1255,7 @@ type MattermostService struct { // MattermostServiceProperties represents Mattermost specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#mattermost-notifications +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications type MattermostServiceProperties struct { WebHook string `json:"webhook"` Username string `json:"username"` @@ -1113,7 +1277,7 @@ type MattermostServiceProperties struct { // GetMattermostService gets Mattermost service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-slack-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings func (s *ServicesService) GetMattermostService(pid interface{}, options ...RequestOptionFunc) (*MattermostService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1139,7 +1303,7 @@ func (s *ServicesService) GetMattermostService(pid interface{}, options ...Reque // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-mattermost-notifications-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service type SetMattermostServiceOptions struct { WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` @@ -1260,7 +1424,7 @@ func (s *ServicesService) DeleteMattermostSlashCommandsService(pid interface{}, // SetMattermostService sets Mattermost service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-mattermost-notifications-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1279,7 +1443,7 @@ func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermo // DeleteMattermostService deletes Mattermost service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-mattermost-notifications-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-mattermost-notifications-service func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1298,7 +1462,7 @@ func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...Re // MicrosoftTeamsService represents Microsoft Teams service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#microsoft-teams +// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams type MicrosoftTeamsService struct { Service Properties *MicrosoftTeamsServiceProperties `json:"properties"` @@ -1307,7 +1471,7 @@ type MicrosoftTeamsService struct { // MicrosoftTeamsServiceProperties represents Microsoft Teams specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#microsoft-teams +// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams type MicrosoftTeamsServiceProperties struct { WebHook string `json:"webhook"` NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` @@ -1325,7 +1489,7 @@ type MicrosoftTeamsServiceProperties struct { // GetMicrosoftTeamsService gets MicrosoftTeams service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-microsoft-teams-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-microsoft-teams-service-settings func (s *ServicesService) GetMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1351,7 +1515,7 @@ func (s *ServicesService) GetMicrosoftTeamsService(pid interface{}, options ...R // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#create-edit-microsoft-teams-service +// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service type SetMicrosoftTeamsServiceOptions struct { WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` @@ -1370,7 +1534,7 @@ type SetMicrosoftTeamsServiceOptions struct { // SetMicrosoftTeamsService sets Microsoft Teams service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#create-edit-microsoft-teams-service +// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1388,7 +1552,7 @@ func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicr // DeleteMicrosoftTeamsService deletes Microsoft Teams service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-microsoft-teams-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-microsoft-teams-service func (s *ServicesService) DeleteMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1407,7 +1571,7 @@ func (s *ServicesService) DeleteMicrosoftTeamsService(pid interface{}, options . // PipelinesEmailService represents Pipelines Email service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#pipeline-emails +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails type PipelinesEmailService struct { Service Properties *PipelinesEmailProperties `json:"properties"` @@ -1416,7 +1580,7 @@ type PipelinesEmailService struct { // PipelinesEmailProperties represents PipelinesEmail specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#pipeline-emails +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails type PipelinesEmailProperties struct { Recipients string `json:"recipients"` NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` @@ -1427,7 +1591,7 @@ type PipelinesEmailProperties struct { // GetPipelinesEmailService gets Pipelines Email service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-pipeline-emails-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-pipeline-emails-service-settings func (s *ServicesService) GetPipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1453,7 +1617,7 @@ func (s *ServicesService) GetPipelinesEmailService(pid interface{}, options ...R // SetPipelinesEmailService() options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#pipeline-emails +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails type SetPipelinesEmailServiceOptions struct { Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` @@ -1466,7 +1630,7 @@ type SetPipelinesEmailServiceOptions struct { // SetPipelinesEmailService sets Pipelines Email service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#pipeline-emails +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1485,7 +1649,7 @@ func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipe // DeletePipelinesEmailService deletes Pipelines Email service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-pipeline-emails-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-pipeline-emails-service func (s *ServicesService) DeletePipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1504,7 +1668,7 @@ func (s *ServicesService) DeletePipelinesEmailService(pid interface{}, options . // PrometheusService represents Prometheus service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#prometheus +// https://docs.gitlab.com/ee/api/integrations.html#prometheus type PrometheusService struct { Service Properties *PrometheusServiceProperties `json:"properties"` @@ -1513,7 +1677,7 @@ type PrometheusService struct { // PrometheusServiceProperties represents Prometheus specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#prometheus +// https://docs.gitlab.com/ee/api/integrations.html#prometheus type PrometheusServiceProperties struct { APIURL string `json:"api_url"` GoogleIAPAudienceClientID string `json:"google_iap_audience_client_id"` @@ -1523,7 +1687,7 @@ type PrometheusServiceProperties struct { // GetPrometheusService gets Prometheus service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-prometheus-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-prometheus-service-settings func (s *ServicesService) GetPrometheusService(pid interface{}, options ...RequestOptionFunc) (*PrometheusService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1549,7 +1713,7 @@ func (s *ServicesService) GetPrometheusService(pid interface{}, options ...Reque // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-prometheus-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service type SetPrometheusServiceOptions struct { APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` GoogleIAPAudienceClientID *string `url:"google_iap_audience_client_id,omitempty" json:"google_iap_audience_client_id,omitempty"` @@ -1559,7 +1723,7 @@ type SetPrometheusServiceOptions struct { // SetPrometheusService sets Prometheus service for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-prometheus-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPrometheusServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1578,7 +1742,7 @@ func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPromethe // DeletePrometheusService deletes Prometheus service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-prometheus-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-prometheus-service func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1597,7 +1761,7 @@ func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...Re // SlackService represents Slack service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#slack +// https://docs.gitlab.com/ee/api/integrations.html#slack type SlackService struct { Service Properties *SlackServiceProperties `json:"properties"` @@ -1606,7 +1770,7 @@ type SlackService struct { // SlackServiceProperties represents Slack specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#slack +// https://docs.gitlab.com/ee/api/integrations.html#slack type SlackServiceProperties struct { WebHook string `json:"webhook"` Username string `json:"username"` @@ -1631,7 +1795,7 @@ type SlackServiceProperties struct { // GetSlackService gets Slack service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-slack-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings func (s *ServicesService) GetSlackService(pid interface{}, options ...RequestOptionFunc) (*SlackService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1657,7 +1821,7 @@ func (s *ServicesService) GetSlackService(pid interface{}, options ...RequestOpt // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-slack-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service type SetSlackServiceOptions struct { WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` Username *string `url:"username,omitempty" json:"username,omitempty"` @@ -1692,7 +1856,7 @@ type SetSlackServiceOptions struct { // SetSlackService sets Slack service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#edit-slack-service +// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1711,7 +1875,7 @@ func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceO // DeleteSlackService deletes Slack service for project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-slack-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-slack-service func (s *ServicesService) DeleteSlackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1773,7 +1937,7 @@ func (s *ServicesService) GetSlackSlashCommandsService(pid interface{}, options // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-slack-slash-command-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-slack-slash-command-service type SetSlackSlashCommandsServiceOptions struct { Token *string `url:"token,omitempty" json:"token,omitempty"` } @@ -1781,7 +1945,7 @@ type SetSlackSlashCommandsServiceOptions struct { // SetSlackSlashCommandsService sets Slack slash commands service for a project // // GitLab API docs: -// https://docs.gitlab.com/13.12/ee/api/services.html#createedit-slack-slash-command-service +// https://docs.gitlab.com/13.12/ee/api/integrations.html#createedit-slack-slash-command-service func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1800,7 +1964,7 @@ func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *Set // DeleteSlackSlashCommandsService deletes Slack slash commands service for project. // // GitLab API docs: -// https://docs.gitlab.com/13.12/ee/api/services.html#delete-slack-slash-command-service +// https://docs.gitlab.com/13.12/ee/api/integrations.html#delete-slack-slash-command-service func (s *ServicesService) DeleteSlackSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1922,7 +2086,7 @@ func (s *ServicesService) DeleteTelegramService(pid interface{}, options ...Requ // YouTrackService represents YouTrack service settings. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#youtrack +// https://docs.gitlab.com/ee/api/integrations.html#youtrack type YouTrackService struct { Service Properties *YouTrackServiceProperties `json:"properties"` @@ -1931,7 +2095,7 @@ type YouTrackService struct { // YouTrackServiceProperties represents YouTrack specific properties. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#youtrack +// https://docs.gitlab.com/ee/api/integrations.html#youtrack type YouTrackServiceProperties struct { IssuesURL string `json:"issues_url"` ProjectURL string `json:"project_url"` @@ -1942,7 +2106,7 @@ type YouTrackServiceProperties struct { // GetYouTrackService gets YouTrack service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#get-youtrack-service-settings +// https://docs.gitlab.com/ee/api/integrations.html#get-youtrack-service-settings func (s *ServicesService) GetYouTrackService(pid interface{}, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { project, err := parseID(pid) if err != nil { @@ -1968,7 +2132,7 @@ func (s *ServicesService) GetYouTrackService(pid interface{}, options ...Request // options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-youtrack-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service type SetYouTrackServiceOptions struct { IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` @@ -1979,7 +2143,7 @@ type SetYouTrackServiceOptions struct { // SetYouTrackService sets YouTrack service for a project // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#createedit-youtrack-service +// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { @@ -1998,7 +2162,7 @@ func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackSe // DeleteYouTrackService deletes YouTrack service settings for a project. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/services.html#delete-youtrack-service +// https://docs.gitlab.com/ee/api/integrations.html#delete-youtrack-service func (s *ServicesService) DeleteYouTrackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go index ac9056687b..f4d67a4f04 100644 --- a/vendor/github.com/xanzy/go-gitlab/settings.go +++ b/vendor/github.com/xanzy/go-gitlab/settings.go @@ -17,6 +17,7 @@ package gitlab import ( + "encoding/json" "net/http" "time" ) @@ -42,339 +43,408 @@ type SettingsService struct { // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 type Settings struct { - ID int `json:"id"` - AbuseNotificationEmail string `json:"abuse_notification_email"` - AdminMode bool `json:"admin_mode"` - AfterSignOutPath string `json:"after_sign_out_path"` - AfterSignUpText string `json:"after_sign_up_text"` - AkismetAPIKey string `json:"akismet_api_key"` - AkismetEnabled bool `json:"akismet_enabled"` - AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` - AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` - AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` - ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` - AssetProxyEnabled bool `json:"asset_proxy_enabled"` - AssetProxyURL string `json:"asset_proxy_url"` - AssetProxySecretKey string `json:"asset_proxy_secret_key"` - AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` - AutoDevOpsDomain string `json:"auto_devops_domain"` - AutoDevOpsEnabled bool `json:"auto_devops_enabled"` - AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` - CanCreateGroup bool `json:"can_create_group"` - CheckNamespacePlan bool `json:"check_namespace_plan"` - CommitEmailHostname string `json:"commit_email_hostname"` - ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` - ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` - ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` - ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` - ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` - ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` - ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` - ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` - ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` - ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` - ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` - CreatedAt *time.Time `json:"created_at"` - CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` - DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DSAKeyRestriction int `json:"dsa_key_restriction"` - DeactivateDormantUsers bool `json:"deactivate_dormant_users"` - DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` - DefaultBranchName string `json:"default_branch_name"` - DefaultBranchProtection int `json:"default_branch_protection"` - DefaultCiConfigPath string `json:"default_ci_config_path"` - DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` - DefaultProjectCreation int `json:"default_project_creation"` - DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` - DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultProjectsLimit int `json:"default_projects_limit"` - DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DelayedGroupDeletion bool `json:"delayed_group_deletion"` - DelayedProjectDeletion bool `json:"delayed_project_deletion"` - DeleteInactiveProjects bool `json:"delete_inactive_projects"` - DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` - DiffMaxFiles int `json:"diff_max_files"` - DiffMaxLines int `json:"diff_max_lines"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` - DisableFeedToken bool `json:"disable_feed_token"` - DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` - DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` - DomainAllowlist []string `json:"domain_allowlist"` - DomainDenylist []string `json:"domain_denylist"` - DomainDenylistEnabled bool `json:"domain_denylist_enabled"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` - EKSAccessKeyID string `json:"eks_access_key_id"` - EKSAccountID string `json:"eks_account_id"` - EKSIntegrationEnabled bool `json:"eks_integration_enabled"` - EKSSecretAccessKey string `json:"eks_secret_access_key"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` - ElasticsearchAWS bool `json:"elasticsearch_aws"` - ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` - ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` - ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` - ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` - ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` - ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` - ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` - ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` - ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` - ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` - ElasticsearchIndexing bool `json:"elasticsearch_indexing"` - ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` - ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` - ElasticsearchPassword string `json:"elasticsearch_password"` - ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchReplicas int `json:"elasticsearch_replicas"` - ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchShards int `json:"elasticsearch_shards"` - ElasticsearchURL []string `json:"elasticsearch_url"` - ElasticsearchUsername string `json:"elasticsearch_username"` - EmailAdditionalText string `json:"email_additional_text"` - EmailAuthorInBody bool `json:"email_author_in_body"` - EmailRestrictions string `json:"email_restrictions"` - EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` - EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` - EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` - EnforcePATExpiration bool `json:"enforce_pat_expiration"` - EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` - EnforceTerms bool `json:"enforce_terms"` - ExternalAuthClientCert string `json:"external_auth_client_cert"` - ExternalAuthClientKey string `json:"external_auth_client_key"` - ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` - ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` - ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` - ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` - ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` - ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` - ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` - FlocEnabled bool `json:"floc_enabled"` - GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` - GitpodEnabled bool `json:"gitpod_enabled"` - GitpodURL string `json:"gitpod_url"` - GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` - GrafanaEnabled bool `json:"grafana_enabled"` - GrafanaURL string `json:"grafana_url"` - GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` - GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` - GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` - HTMLEmailsEnabled bool `json:"html_emails_enabled"` - HashedStorageEnabled bool `json:"hashed_storage_enabled"` - HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` - HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` - HelpPageSupportURL string `json:"help_page_support_url"` - HelpPageText string `json:"help_page_text"` - HelpText string `json:"help_text"` - HideThirdPartyOffers bool `json:"hide_third_party_offers"` - HomePageURL string `json:"home_page_url"` - HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` - HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` - HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` - ImportSources []string `json:"import_sources"` - InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` - InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` - InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` - InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` - InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` - IssuesCreateLimit int `json:"issues_create_limit"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - KrokiEnabled bool `json:"kroki_enabled"` - KrokiFormats map[string]bool `json:"kroki_formats"` - KrokiURL string `json:"kroki_url"` - LocalMarkdownVersion int `json:"local_markdown_version"` - LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` - LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` - MailgunEventsEnabled bool `json:"mailgun_events_enabled"` - MailgunSigningKey string `json:"mailgun_signing_key"` - MaintenanceMode bool `json:"maintenance_mode"` - MaintenanceModeMessage string `json:"maintenance_mode_message"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxExportSize int `json:"max_export_size"` - MaxImportSize int `json:"max_import_size"` - MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` - MaxPagesSize int `json:"max_pages_size"` - MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` - MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` - MaxYAMLDepth int `json:"max_yaml_depth"` - MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MinimumPasswordLength int `json:"minimum_password_length"` - MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` - NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` - NotesCreateLimit int `json:"notes_create_limit"` - NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` - OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` - OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` - PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` - PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` - PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` - PasswordNumberRequired bool `json:"password_number_required"` - PasswordSymbolRequired bool `json:"password_symbol_required"` - PasswordUppercaseRequired bool `json:"password_uppercase_required"` - PasswordLowercaseRequired bool `json:"password_lowercase_required"` - PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` - PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` - PerformanceBarEnabled bool `json:"performance_bar_enabled"` - PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` - PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` - PlantumlEnabled bool `json:"plantuml_enabled"` - PlantumlURL string `json:"plantuml_url"` - PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` - PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` - PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` - ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` - PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` - ProtectedCIVariables bool `json:"protected_ci_variables"` - PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` - PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` - RSAKeyRestriction int `json:"rsa_key_restriction"` - RateLimitingResponseText string `json:"rate_limiting_response_text"` - RawBlobRequestLimit int `json:"raw_blob_request_limit"` - RecaptchaEnabled bool `json:"recaptcha_enabled"` - RecaptchaPrivateKey string `json:"recaptcha_private_key"` - RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` - RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` - RepositoryStorages []string `json:"repository_storages"` - RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` - RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` - RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` - RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - SearchRateLimit int `json:"search_rate_limit"` - SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` - SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` - SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` - SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` - SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` - SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` - SentryClientsideDSN string `json:"sentry_clientside_dsn"` - SentryDSN string `json:"sentry_dsn"` - SentryEnabled bool `json:"sentry_enabled"` - SentryEnvironment string `json:"sentry_environment"` - SessionExpireDelay int `json:"session_expire_delay"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` - SharedRunnersText string `json:"shared_runners_text"` - SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` - SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` - SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` - SignInText string `json:"sign_in_text"` - SignupEnabled bool `json:"signup_enabled"` - SlackAppEnabled bool `json:"slack_app_enabled"` - SlackAppID string `json:"slack_app_id"` - SlackAppSecret string `json:"slack_app_secret"` - SlackAppSigningSecret string `json:"slack_app_signing_secret"` - SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnippetSizeLimit int `json:"snippet_size_limit"` - SnowplowAppID string `json:"snowplow_app_id"` - SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` - SnowplowCookieDomain string `json:"snowplow_cookie_domain"` - SnowplowEnabled bool `json:"snowplow_enabled"` - SourcegraphEnabled bool `json:"sourcegraph_enabled"` - SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` - SourcegraphURL string `json:"sourcegraph_url"` - SpamCheckAPIKey string `json:"spam_check_api_key"` - SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` - SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` - SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` - Terms string `json:"terms"` - ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` - ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` - ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` - ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` - ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` - ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` - ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` - ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` - ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` - ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` - ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` - ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` - ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` - ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` - ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` - ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` - ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` - ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` - ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` - ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` - ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` - ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` - ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` - TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` - UpdatedAt *time.Time `json:"updated_at"` - UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` - UsagePingEnabled bool `json:"usage_ping_enabled"` - UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` - UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` - UserDefaultExternal bool `json:"user_default_external"` - UserDefaultInternalRegex string `json:"user_default_internal_regex"` - UserOauthApplications bool `json:"user_oauth_applications"` - UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - UsersGetByIDLimit int `json:"users_get_by_id_limit"` - UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` - VersionCheckEnabled bool `json:"version_check_enabled"` - WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` - WhatsNewVariant string `json:"whats_new_variant"` - WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` + ID int `json:"id"` + AbuseNotificationEmail string `json:"abuse_notification_email"` + AdminMode bool `json:"admin_mode"` + AfterSignOutPath string `json:"after_sign_out_path"` + AfterSignUpText string `json:"after_sign_up_text"` + AkismetAPIKey string `json:"akismet_api_key"` + AkismetEnabled bool `json:"akismet_enabled"` + AllowAccountDeletion bool `json:"allow_account_deletion"` + AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` + AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` + AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` + AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` + AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` + ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` + ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` + AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` + AssetProxyEnabled bool `json:"asset_proxy_enabled"` + AssetProxyURL string `json:"asset_proxy_url"` + AssetProxySecretKey string `json:"asset_proxy_secret_key"` + AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` + AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` + AutoDevOpsDomain string `json:"auto_devops_domain"` + AutoDevOpsEnabled bool `json:"auto_devops_enabled"` + AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` + BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` + BulkImportEnabled bool `json:"bulk_import_enabled"` + BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` + CanCreateGroup bool `json:"can_create_group"` + CheckNamespacePlan bool `json:"check_namespace_plan"` + CIMaxIncludes int `json:"ci_max_includes"` + CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` + CommitEmailHostname string `json:"commit_email_hostname"` + ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` + ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` + ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` + ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` + ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` + ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` + ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` + ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + CreatedAt *time.Time `json:"created_at"` + CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` + DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` + DSAKeyRestriction int `json:"dsa_key_restriction"` + DeactivateDormantUsers bool `json:"deactivate_dormant_users"` + DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` + DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` + DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` + DefaultBranchName string `json:"default_branch_name"` + DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtectionDefaults BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath string `json:"default_ci_config_path"` + DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` + DefaultPreferredLanguage string `json:"default_preferred_language"` + DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` + DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` + DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` + DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` + DelayedGroupDeletion bool `json:"delayed_group_deletion"` + DelayedProjectDeletion bool `json:"delayed_project_deletion"` + DeleteInactiveProjects bool `json:"delete_inactive_projects"` + DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` + DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DiagramsnetEnabled bool `json:"diagramsnet_enabled"` + DiagramsnetURL string `json:"diagramsnet_url"` + DiffMaxFiles int `json:"diff_max_files"` + DiffMaxLines int `json:"diff_max_lines"` + DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` + DisableFeedToken bool `json:"disable_feed_token"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` + DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` + DomainAllowlist []string `json:"domain_allowlist"` + DomainDenylist []string `json:"domain_denylist"` + DomainDenylistEnabled bool `json:"domain_denylist_enabled"` + DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` + DuoFeaturesEnabled bool `json:"duo_features_enabled"` + ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + EKSAccessKeyID string `json:"eks_access_key_id"` + EKSAccountID string `json:"eks_account_id"` + EKSIntegrationEnabled bool `json:"eks_integration_enabled"` + EKSSecretAccessKey string `json:"eks_secret_access_key"` + Ed25519KeyRestriction int `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + ElasticsearchAWS bool `json:"elasticsearch_aws"` + ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` + ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` + ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` + ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` + ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` + ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` + ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` + ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchIndexing bool `json:"elasticsearch_indexing"` + ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` + ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` + ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchPassword string `json:"elasticsearch_password"` + ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` + ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` + ElasticsearchSearch bool `json:"elasticsearch_search"` + ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchURL []string `json:"elasticsearch_url"` + ElasticsearchUsername string `json:"elasticsearch_username"` + ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` + EmailAdditionalText string `json:"email_additional_text"` + EmailAuthorInBody bool `json:"email_author_in_body"` + EmailConfirmationSetting string `json:"email_confirmation_setting"` + EmailRestrictions string `json:"email_restrictions"` + EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` + EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` + EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` + EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` + EnforcePATExpiration bool `json:"enforce_pat_expiration"` + EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` + EnforceTerms bool `json:"enforce_terms"` + ExternalAuthClientCert string `json:"external_auth_client_cert"` + ExternalAuthClientKey string `json:"external_auth_client_key"` + ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` + ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` + ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` + ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` + ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` + ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` + ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` + FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` + FileTemplateProjectID int `json:"file_template_project_id"` + FirstDayOfWeek int `json:"first_day_of_week"` + FlocEnabled bool `json:"floc_enabled"` + GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` + GeoStatusTimeout int `json:"geo_status_timeout"` + GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` + GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int `json:"gitaly_timeout_default"` + GitalyTimeoutFast int `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` + GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` + GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` + GitpodEnabled bool `json:"gitpod_enabled"` + GitpodURL string `json:"gitpod_url"` + GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` + GloballyAllowedIPs string `json:"globally_allowed_ips"` + GrafanaEnabled bool `json:"grafana_enabled"` + GrafanaURL string `json:"grafana_url"` + GravatarEnabled bool `json:"gravatar_enabled"` + GroupDownloadExportLimit int `json:"group_download_export_limit"` + GroupExportLimit int `json:"group_export_limit"` + GroupImportLimit int `json:"group_import_limit"` + GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` + GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + HTMLEmailsEnabled bool `json:"html_emails_enabled"` + HashedStorageEnabled bool `json:"hashed_storage_enabled"` + HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` + HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` + HelpPageSupportURL string `json:"help_page_support_url"` + HelpPageText string `json:"help_page_text"` + HelpText string `json:"help_text"` + HideThirdPartyOffers bool `json:"hide_third_party_offers"` + HomePageURL string `json:"home_page_url"` + HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` + HousekeepingEnabled bool `json:"housekeeping_enabled"` + HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` + ImportSources []string `json:"import_sources"` + InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` + InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` + InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` + IssuesCreateLimit int `json:"issues_create_limit"` + JiraConnectApplicationKey string `json:"jira_connect_application_key"` + JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` + JiraConnectProxyURL string `json:"jira_connect_proxy_url"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + KrokiEnabled bool `json:"kroki_enabled"` + KrokiFormats map[string]bool `json:"kroki_formats"` + KrokiURL string `json:"kroki_url"` + LocalMarkdownVersion int `json:"local_markdown_version"` + LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` + LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` + LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` + MailgunEventsEnabled bool `json:"mailgun_events_enabled"` + MailgunSigningKey string `json:"mailgun_signing_key"` + MaintenanceMode bool `json:"maintenance_mode"` + MaintenanceModeMessage string `json:"maintenance_mode_message"` + MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` + MaxArtifactsSize int `json:"max_artifacts_size"` + MaxAttachmentSize int `json:"max_attachment_size"` + MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` + MaxExportSize int `json:"max_export_size"` + MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` + MaxImportSize int `json:"max_import_size"` + MaxLoginAttempts int `json:"max_login_attempts"` + MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` + MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` + MaxYAMLDepth int `json:"max_yaml_depth"` + MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` + MinimumPasswordLength int `json:"minimum_password_length"` + MirrorAvailable bool `json:"mirror_available"` + MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int `json:"mirror_max_capacity"` + MirrorMaxDelay int `json:"mirror_max_delay"` + NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` + NotesCreateLimit int `json:"notes_create_limit"` + NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` + NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` + OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` + OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` + PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` + PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` + PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` + PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` + PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` + PasswordNumberRequired bool `json:"password_number_required"` + PasswordSymbolRequired bool `json:"password_symbol_required"` + PasswordUppercaseRequired bool `json:"password_uppercase_required"` + PasswordLowercaseRequired bool `json:"password_lowercase_required"` + PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` + PerformanceBarEnabled bool `json:"performance_bar_enabled"` + PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` + PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PlantumlEnabled bool `json:"plantuml_enabled"` + PlantumlURL string `json:"plantuml_url"` + PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` + PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` + PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` + ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectExportEnabled bool `json:"project_export_enabled"` + ProjectExportLimit int `json:"project_export_limit"` + ProjectImportLimit int `json:"project_import_limit"` + ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` + ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` + ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` + PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` + ProtectedCIVariables bool `json:"protected_ci_variables"` + PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` + PushEventActivitiesLimit int `json:"push_event_activities_limit"` + PushEventHooksLimit int `json:"push_event_hooks_limit"` + PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` + RSAKeyRestriction int `json:"rsa_key_restriction"` + RateLimitingResponseText string `json:"rate_limiting_response_text"` + RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RecaptchaEnabled bool `json:"recaptcha_enabled"` + RecaptchaPrivateKey string `json:"recaptcha_private_key"` + RecaptchaSiteKey string `json:"recaptcha_site_key"` + ReceiveMaxInputSize int `json:"receive_max_input_size"` + ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` + RememberMeEnabled bool `json:"remember_me_enabled"` + RepositoryChecksEnabled bool `json:"repository_checks_enabled"` + RepositorySizeLimit int `json:"repository_size_limit"` + RepositoryStorages []string `json:"repository_storages"` + RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` + RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` + RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` + RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` + RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + SearchRateLimit int `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` + SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` + SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` + SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` + SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` + SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` + SecurityTXTContent string `json:"security_txt_content"` + SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` + SentryClientsideDSN string `json:"sentry_clientside_dsn"` + SentryDSN string `json:"sentry_dsn"` + SentryEnabled bool `json:"sentry_enabled"` + SentryEnvironment string `json:"sentry_environment"` + ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` + SessionExpireDelay int `json:"session_expire_delay"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersText string `json:"shared_runners_text"` + SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` + SignInText string `json:"sign_in_text"` + SignupEnabled bool `json:"signup_enabled"` + SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` + SilentModeEnabled bool `json:"silent_mode_enabled"` + SlackAppEnabled bool `json:"slack_app_enabled"` + SlackAppID string `json:"slack_app_id"` + SlackAppSecret string `json:"slack_app_secret"` + SlackAppSigningSecret string `json:"slack_app_signing_secret"` + SlackAppVerificationToken string `json:"slack_app_verification_token"` + SnippetSizeLimit int `json:"snippet_size_limit"` + SnowplowAppID string `json:"snowplow_app_id"` + SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` + SnowplowCookieDomain string `json:"snowplow_cookie_domain"` + SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` + SnowplowEnabled bool `json:"snowplow_enabled"` + SourcegraphEnabled bool `json:"sourcegraph_enabled"` + SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` + SourcegraphURL string `json:"sourcegraph_url"` + SpamCheckAPIKey string `json:"spam_check_api_key"` + SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` + SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` + StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` + StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` + SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` + TerminalMaxSessionTime int `json:"terminal_max_session_time"` + Terms string `json:"terms"` + ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` + ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` + ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` + ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` + ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` + ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` + ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` + ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` + TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` + UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` + UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` + UsagePingEnabled bool `json:"usage_ping_enabled"` + UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` + UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` + UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` + UserDefaultExternal bool `json:"user_default_external"` + UserDefaultInternalRegex string `json:"user_default_internal_regex"` + UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` + UserOauthApplications bool `json:"user_oauth_applications"` + UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` + UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` + ValidRunnerRegistrars []string `json:"valid_runner_registrars"` + VersionCheckEnabled bool `json:"version_check_enabled"` + WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + WhatsNewVariant string `json:"whats_new_variant"` + WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` // Deprecated: Use AbuseNotificationEmail instead. AdminNotificationEmail string `json:"admin_notification_email"` @@ -392,6 +462,42 @@ type Settings struct { UserEmailLookupLimit int `json:"user_email_lookup_limit"` } +// BranchProtectionDefaults represents default Git protected branch permissions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaults struct { + AllowedToPush []int `json:"allowed_to_push,omitempty"` + AllowForcePush bool `json:"allow_force_push,omitempty"` + AllowedToMerge []int `json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` +} + +// Settings requires a custom unmarshaller in order to properly unmarshal +// `container_registry_import_created_before` which is either a time.Time or +// an empty string if no value is set. +func (s *Settings) UnmarshalJSON(data []byte) error { + type Alias Settings + + raw := make(map[string]interface{}) + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + + // If empty string, remove the value to leave it nil in the response. + if v, ok := raw["container_registry_import_created_before"]; ok && v == "" { + delete(raw, "container_registry_import_created_before") + + data, err = json.Marshal(raw) + if err != nil { + return err + } + } + + return json.Unmarshal(data, (*Alias)(s)) +} + func (s Settings) String() string { return Stringify(s) } @@ -420,343 +526,423 @@ func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, // GitLab API docs: // https://docs.gitlab.com/ee/api/settings.html#change-application-settings type UpdateSettingsOptions struct { - AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` - AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` - AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` - AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` - AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` - AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` - AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` - AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` - AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` - AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` - AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` - ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` - AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` - AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` - AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` - AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` - AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` - AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` - AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` - AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` - CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` - ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` - ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` - ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` - ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` - ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` - ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` - ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` - ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` - ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` - ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` - ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` - ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` - CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` - DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` - DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` - DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` - DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` - DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` - DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` - DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` - DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` - DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` - DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` - DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` - DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` - DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` - DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` - DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` - DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` - DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` - DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` - DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` - DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` - DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` - DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` - DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` - DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` - ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` - ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` - EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` - EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` - EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` - EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` - Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` - Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` - ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` - ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` - ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` - ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` - ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` - ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` - ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` - ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` - ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` - ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` - ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` - ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` - ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` - ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` - ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` - ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` - ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` - ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` - ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` - ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` - ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` - ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` - ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` - ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` - EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` - EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` - EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` - EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` - EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` - EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` - EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` - EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` - EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` - ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` - ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` - ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` - ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` - ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` - ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` - ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` - ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` - ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` - ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` - FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` - GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` - GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` - GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` - GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` - GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` - GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` - GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` - GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` - GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` - GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` - GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` - GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` - GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` - GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` - GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` - GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` - GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` - HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` - HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` - HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` - HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` - HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` - HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` - HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` - HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` - HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` - HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` - HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` - HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` - HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` - HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` - HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` - ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` - InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` - InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` - InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` - InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` - InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` - IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` - KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` - KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` - KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` - KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` - LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` - LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` - LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` - MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` - MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` - MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` - MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` - MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` - MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` - MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` - MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` - MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` - MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` - MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` - MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` - MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` - MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` - MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` - MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` - MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` - MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` - MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` - MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` - NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` - NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` - NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` - OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` - OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` - PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` - PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` - PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` - PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` - PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` - PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` - PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` - PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` - PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` - PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` - PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` - PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` - PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` - PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` - PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` - PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` - PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` - PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` - ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` - ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` - ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` - ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` - ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` - PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` - ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` - PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` - PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` - PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` - PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` - RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` - RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` - RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` - RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` - RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` - RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` - ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` - RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` - RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` - RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` - RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` - RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` - RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` - RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` - SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` - SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` - SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` - SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` - SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` - SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` - SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` - SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` - SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` - SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` - SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` - SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` - SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` - SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` - SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` - SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` - SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` - SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` - SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` - SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` - SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` - SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` - SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` - SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` - SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` - SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` - SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` - SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` - SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` - SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` - SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` - SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` - SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` - SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` - SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` - TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` - Terms *string `url:"terms,omitempty" json:"terms,omitempty"` - ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` - ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` - ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` - ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` - ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` - ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` - ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` - ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` - ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` - ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` - ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` - ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` - ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` - ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` - ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` - ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` - ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` - ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` - ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` - ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` - ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` - TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` - UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` - UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` - UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` - UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` - UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` - UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` - UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` - UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` - UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` - UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` - UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` - UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` - UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` - VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` - WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` - WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` - WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` + AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` + AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` + AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` + AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` + AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` + AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` + AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` + AllowAccountDeletion *bool `url:"allow_account_deletion,omitempty" json:"allow_account_deletion,omitempty"` + AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` + AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` + AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` + AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` + AllowProjectCreationForGuestAndBelow *bool `url:"allow_project_creation_for_guest_and_below,omitempty" json:"allow_project_creation_for_guest_and_below,omitempty"` + AllowRunnerRegistrationToken *bool `url:"allow_runner_registration_token,omitempty" json:"allow_runner_registration_token,omitempty"` + ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` + ASCIIDocMaxIncludes *int `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` + AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` + AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` + AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` + AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` + AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` + AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` + AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` + AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` + AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` + BulkImportConcurrentPipelineBatchLimit *int `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` + BulkImportEnabled *bool `url:"bulk_import_enabled,omitempty" json:"bulk_import_enabled,omitempty"` + BulkImportMaxDownloadFileSize *int `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` + CIMaxIncludes *int `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` + CIMaxTotalYAMLSizeBytes *int `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` + CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` + ConcurrentBitbucketImportJobsLimit *int `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` + ConcurrentBitbucketServerImportJobsLimit *int `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` + ConcurrentGitHubImportJobsLimit *int `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` + ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` + ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` + ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` + ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` + ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` + ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` + ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` + ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` + ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` + ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` + ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` + ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` + CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` + DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` + DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` + DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` + DeactivateDormantUsersPeriod *int `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` + DecompressArchiveFileTimeout *int `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` + DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` + DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtectionDefaults *BranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` + DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` + DefaultPreferredLanguage *string `url:"default_preferred_language,omitempty" json:"default_preferred_language,omitempty"` + DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` + DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` + DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` + DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` + DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` + DefaultSyntaxHighlightingTheme *int `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` + DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` + DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` + DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` + DeleteUnconfirmedUsers *bool `url:"delete_unconfirmed_users,omitempty" json:"delete_unconfirmed_users,omitempty"` + DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` + DiagramsnetEnabled *bool `url:"diagramsnet_enabled,omitempty" json:"diagramsnet_enabled,omitempty"` + DiagramsnetURL *string `url:"diagramsnet_url,omitempty" json:"diagramsnet_url,omitempty"` + DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` + DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` + DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` + DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` + DisableAdminOAuthScopes *bool `url:"disable_admin_oauth_scopes,omitempty" json:"disable_admin_oauth_scopes,omitempty"` + DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` + DisablePersonalAccessTokens *bool `url:"disable_personal_access_tokens,omitempty" json:"disable_personal_access_tokens,omitempty"` + DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` + DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` + DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` + DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` + DownstreamPipelineTriggerLimitPerProjectUserSHA *int `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` + DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` + ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` + ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` + EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` + EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` + EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` + EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` + Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` + Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` + ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` + ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` + ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` + ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` + ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` + ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` + ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` + ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` + ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` + ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` + ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` + ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` + ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` + ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` + ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` + ElasticsearchMaxCodeIndexingConcurrency *int `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` + ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` + ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` + ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` + ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` + ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` + ElasticsearchRequeueWorkers *bool `url:"elasticsearch_requeue_workers,omitempty" json:"elasticsearch_requeue_workers,omitempty"` + ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` + ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` + ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` + ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` + ElasticsearchWorkerNumberOfShards *int `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` + EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` + EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` + EmailConfirmationSetting *string `url:"email_confirmation_setting,omitempty" json:"email_confirmation_setting,omitempty"` + EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` + EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` + EnableArtifactExternalRedirectWarningPage *bool `url:"enable_artifact_external_redirect_warning_page,omitempty" json:"enable_artifact_external_redirect_warning_page,omitempty"` + EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` + EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` + EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` + EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` + ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` + ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` + ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` + ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` + ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` + ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` + ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` + ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` + ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` + ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` + FailedLoginAttemptsUnlockPeriodInMinutes *int `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` + FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` + GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` + GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` + GitRateLimitUsersAlertlist *[]string `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` + GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` + GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` + GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` + GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` + GitlabDedicatedInstance *bool `url:"gitlab_dedicated_instance,omitempty" json:"gitlab_dedicated_instance,omitempty"` + GitlabEnvironmentToolkitInstance *bool `url:"gitlab_environment_toolkit_instance,omitempty" json:"gitlab_environment_toolkit_instance,omitempty"` + GitlabShellOperationLimit *int `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` + GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` + GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` + GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` + GloballyAllowedIPs *string `url:"globally_allowed_ips,omitempty" json:"globally_allowed_ips,omitempty"` + GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` + GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` + GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` + GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` + GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` + GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` + GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` + GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` + HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` + HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` + HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` + HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` + HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` + HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` + HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` + HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` + HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` + HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` + HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` + HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` + HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` + HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` + HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` + ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` + InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` + InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` + InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + IncludeOptionalMetricsInServicePing *bool `url:"include_optional_metrics_in_service_ping,omitempty" json:"include_optional_metrics_in_service_ping,omitempty"` + InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` + InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` + IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` + JiraConnectApplicationKey *string `url:"jira_connect_application_key,omitempty" json:"jira_connect_application_key,omitempty"` + JiraConnectPublicKeyStorageEnabled *bool `url:"jira_connect_public_key_storage_enabled,omitempty" json:"jira_connect_public_key_storage_enabled,omitempty"` + JiraConnectProxyURL *string `url:"jira_connect_proxy_url,omitempty" json:"jira_connect_proxy_url,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` + KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` + KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` + LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` + LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` + LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` + LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` + MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` + MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` + MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` + MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` + MavenPackageRequestsForwarding *bool `url:"maven_package_requests_forwarding,omitempty" json:"maven_package_requests_forwarding,omitempty"` + MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` + MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` + MaxDecompressedArchiveSize *int `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` + MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` + MaxImportRemoteFileSize *int `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` + MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` + MaxLoginAttempts *int `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` + MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` + MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` + MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` + MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` + MaxTerraformStateSizeBytes *int `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` + MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` + MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` + MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` + MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` + MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` + MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` + MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` + MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` + NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` + NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` + NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` + NugetSkipMetadataURLValidation *bool `url:"nuget_skip_metadata_url_validation,omitempty" json:"nuget_skip_metadata_url_validation,omitempty"` + OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` + OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` + PackageMetadataPURLTypes *[]int `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` + PackageRegistryAllowAnyoneToPullOption *bool `url:"package_registry_allow_anyone_to_pull_option,omitempty" json:"package_registry_allow_anyone_to_pull_option,omitempty"` + PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` + PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` + PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` + PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` + PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` + PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` + PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` + PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` + PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` + PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` + PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` + PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` + PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` + PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` + PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` + PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` + PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` + PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` + ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` + ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` + ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` + ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` + ProjectJobsAPIRateLimit *int `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` + ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` + ProjectsAPIRateLimitUnauthenticated *int `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` + PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` + ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` + PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` + PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` + PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` + PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` + RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` + RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` + RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` + RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` + RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` + RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` + ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` + ReceptiveClusterAgentsEnabled *bool `url:"receptive_cluster_agents_enabled,omitempty" json:"receptive_cluster_agents_enabled,omitempty"` + RememberMeEnabled *bool `url:"remember_me_enabled,omitempty" json:"remember_me_enabled,omitempty"` + RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` + RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` + RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` + RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` + RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` + RequireAdminTwoFactorAuthentication *bool `url:"require_admin_two_factor_authentication,omitempty" json:"require_admin_two_factor_authentication,omitempty"` + RequirePersonalAccessTokenExpiry *bool `url:"require_personal_access_token_expiry,omitempty" json:"require_personal_access_token_expiry,omitempty"` + RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` + RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` + SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` + SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` + SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` + SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` + SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` + SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` + SecurityApprovalPoliciesLimit *int `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` + SecurityPolicyGlobalGroupApproversEnabled *bool `url:"security_policy_global_group_approvers_enabled,omitempty" json:"security_policy_global_group_approvers_enabled,omitempty"` + SecurityTXTContent *string `url:"security_txt_content,omitempty" json:"security_txt_content,omitempty"` + SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` + SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` + SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` + SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` + SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` + ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` + SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` + SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` + SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` + SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` + SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` + SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` + SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` + SilentAdminExportsEnabled *bool `url:"silent_admin_exports_enabled,omitempty" json:"silent_admin_exports_enabled,omitempty"` + SilentModeEnabled *bool `url:"silent_mode_enabled,omitempty" json:"silent_mode_enabled,omitempty"` + SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` + SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` + SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` + SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` + SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` + SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` + SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` + SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` + SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` + SnowplowDatabaseCollectorHostname *string `url:"snowplow_database_collector_hostname,omitempty" json:"snowplow_database_collector_hostname,omitempty"` + SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` + SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` + SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` + SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` + SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` + SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` + SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` + StaticObjectsExternalStorageAuthToken *string `url:"static_objects_external_storage_auth_token,omitempty" json:"static_objects_external_storage_auth_token,omitempty"` + StaticObjectsExternalStorageURL *string `url:"static_objects_external_storage_url,omitempty" json:"static_objects_external_storage_url,omitempty"` + SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` + TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` + Terms *string `url:"terms,omitempty" json:"terms,omitempty"` + ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` + ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` + ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` + ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` + ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` + ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` + ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` + ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` + ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` + ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` + ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` + ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` + ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` + ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` + ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` + ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` + ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` + ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` + ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` + ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` + ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` + TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + UnconfirmedUsersDeleteAfterDays *int `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` + UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` + UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` + UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` + UpdateRunnerVersionsEnabled *bool `url:"update_runner_versions_enabled,omitempty" json:"update_runner_versions_enabled,omitempty"` + UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` + UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` + UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` + UseClickhouseForAnalytics *bool `url:"use_clickhouse_for_analytics,omitempty" json:"use_clickhouse_for_analytics,omitempty"` + UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` + UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` + UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` + UserDefaultsToPrivateProfile *bool `url:"user_defaults_to_private_profile,omitempty" json:"user_defaults_to_private_profile,omitempty"` + UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` + UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` + UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` + UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` + UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` + ValidRunnerRegistrars *[]string `url:"valid_runner_registrars,omitempty" json:"valid_runner_registrars,omitempty"` + VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` + WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` + WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` + WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` +} + +// BranchProtectionDefaultsOptions represents default Git protected branch permissions options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaultsOptions struct { + AllowedToPush *[]int `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]int `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } // UpdateSettings updates the application settings. diff --git a/vendor/github.com/xanzy/go-gitlab/snippets.go b/vendor/github.com/xanzy/go-gitlab/snippets.go index c25548b123..3cb482773c 100644 --- a/vendor/github.com/xanzy/go-gitlab/snippets.go +++ b/vendor/github.com/xanzy/go-gitlab/snippets.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "net/http" - "net/url" "time" ) @@ -51,6 +50,7 @@ type Snippet struct { } `json:"author"` UpdatedAt *time.Time `json:"updated_at"` CreatedAt *time.Time `json:"created_at"` + ProjectID int `json:"project_id"` WebURL string `json:"web_url"` RawURL string `json:"raw_url"` Files []struct { @@ -136,7 +136,7 @@ func (s *SnippetsService) SnippetContent(snippet int, options ...RequestOptionFu // GitLab API docs: // https://docs.gitlab.com/ee/api/snippets.html#snippet-repository-file-content func (s *SnippetsService) SnippetFileContent(snippet int, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { - filepath := url.QueryEscape(filename) + filepath := PathEscape(filename) u := fmt.Sprintf("snippets/%d/files/%s/%s/raw", snippet, ref, filepath) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/github.com/xanzy/go-gitlab/todos.go index 7ea26d01b7..2e26b70779 100644 --- a/vendor/github.com/xanzy/go-gitlab/todos.go +++ b/vendor/github.com/xanzy/go-gitlab/todos.go @@ -58,8 +58,8 @@ type TodoTarget struct { CreatedAt *time.Time `json:"created_at"` Description string `json:"description"` Downvotes int `json:"downvotes"` - ID int `json:"id"` - IID interface{} `json:"iid"` + ID interface{} `json:"id"` + IID int `json:"iid"` Labels []string `json:"labels"` Milestone *Milestone `json:"milestone"` ProjectID int `json:"project_id"` @@ -82,6 +82,7 @@ type TodoTarget struct { Weight int `json:"weight"` // Only available for type MergeRequest + MergedAt *time.Time `json:"merged_at"` ApprovalsBeforeMerge int `json:"approvals_before_merge"` ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` MergeCommitSHA string `json:"merge_commit_sha"` diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/github.com/xanzy/go-gitlab/types.go index e0dc755c2f..9ce13d735c 100644 --- a/vendor/github.com/xanzy/go-gitlab/types.go +++ b/vendor/github.com/xanzy/go-gitlab/types.go @@ -28,7 +28,7 @@ import ( "time" ) -// Ptr is a helper returns a pointer to v. +// Ptr is a helper that returns a pointer to v. func Ptr[T any](v T) *T { return &v } @@ -62,7 +62,7 @@ func AccessControl(v AccessControlValue) *AccessControlValue { // GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html type AccessLevelValue int -// List of available access levels +// List of available access levels. // // GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html const ( @@ -113,7 +113,7 @@ func ApproverIDs(v interface{}) *ApproverIDsValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -128,12 +128,12 @@ func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a ApproverIDsValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *ApproverIDsValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -153,7 +153,7 @@ func AssigneeID(v interface{}) *AssigneeIDValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -164,12 +164,12 @@ func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a AssigneeIDValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *AssigneeIDValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -189,7 +189,7 @@ func ReviewerID(v interface{}) *ReviewerIDValue { } } -// EncodeValues implements the query.Encoder interface +// EncodeValues implements the query.Encoder interface. func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { switch value := a.value.(type) { case UserIDValue: @@ -200,12 +200,12 @@ func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { return nil } -// MarshalJSON implements the json.Marshaler interface +// MarshalJSON implements the json.Marshaler interface. func (a ReviewerIDValue) MarshalJSON() ([]byte, error) { return json.Marshal(a.value) } -// UnmarshalJSON implements the json.Unmarshaler interface +// UnmarshalJSON implements the json.Unmarshaler interface. func (a *ReviewerIDValue) UnmarshalJSON(bytes []byte) error { return json.Unmarshal(bytes, a.value) } @@ -256,6 +256,42 @@ func BuildState(v BuildStateValue) *BuildStateValue { return Ptr(v) } +// CommentEventAction identifies if a comment has been newly created or updated. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events +type CommentEventAction string + +const ( + CommentEventActionCreate CommentEventAction = "create" + CommentEventActionUpdate CommentEventAction = "update" +) + +// ContainerRegistryStatus represents the status of a Container Registry. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories +type ContainerRegistryStatus string + +// ContainerRegistryStatus represents all valid statuses of a Container Registry. +// +// Undocumented, see code at: +// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/container_repository.rb?ref_type=heads#L35 +const ( + ContainerRegistryStatusDeleteScheduled ContainerRegistryStatus = "delete_scheduled" + ContainerRegistryStatusDeleteFailed ContainerRegistryStatus = "delete_failed" + ContainerRegistryStatusDeleteOngoing ContainerRegistryStatus = "delete_ongoing" +) + +// DeploymentApprovalStatus represents a Gitlab deployment approval status. +type DeploymentApprovalStatus string + +// These constants represent all valid deployment approval statuses. +const ( + DeploymentApprovalStatusApproved DeploymentApprovalStatus = "approved" + DeploymentApprovalStatusRejected DeploymentApprovalStatus = "rejected" +) + // DeploymentStatusValue represents a Gitlab deployment status. type DeploymentStatusValue string @@ -276,12 +312,43 @@ func DeploymentStatus(v DeploymentStatusValue) *DeploymentStatusValue { return Ptr(v) } -// EventTypeValue represents actions type for contribution events +// DORAMetricType represents all valid DORA metrics types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricType string + +// List of available DORA metric type names. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricDeploymentFrequency DORAMetricType = "deployment_frequency" + DORAMetricLeadTimeForChanges DORAMetricType = "lead_time_for_changes" + DORAMetricTimeToRestoreService DORAMetricType = "time_to_restore_service" + DORAMetricChangeFailureRate DORAMetricType = "change_failure_rate" +) + +// DORAMetricInterval represents the time period over which the +// metrics are aggregated. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricInterval string + +// List of available DORA metric interval types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricIntervalDaily DORAMetricInterval = "daily" + DORAMetricIntervalMonthly DORAMetricInterval = "monthly" + DORAMetricIntervalAll DORAMetricInterval = "all" +) + +// EventTypeValue represents actions type for contribution events. type EventTypeValue string -// List of available action type +// List of available action type. // -// GitLab API docs: https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events +// GitLab API docs: +// https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events const ( CreatedEventType EventTypeValue = "created" UpdatedEventType EventTypeValue = "updated" @@ -296,10 +363,10 @@ const ( ExpiredEventType EventTypeValue = "expired" ) -// EventTargetTypeValue represents actions type value for contribution events +// EventTargetTypeValue represents actions type value for contribution events. type EventTargetTypeValue string -// List of available action type +// List of available action type. // // GitLab API docs: https://docs.gitlab.com/ee/api/events.html#target-types const ( @@ -314,7 +381,8 @@ const ( // FileActionValue represents the available actions that can be performed on a file. // -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions type FileActionValue string // The available file actions. @@ -370,7 +438,7 @@ func GenericPackageStatus(v GenericPackageStatusValue) *GenericPackageStatusValu // ISOTime represents an ISO 8601 formatted date. type ISOTime time.Time -// ISO 8601 date format +// ISO 8601 date format. const iso8601 = "2006-01-02" // ParseISOTime parses an ISO 8601 formatted date. @@ -448,7 +516,7 @@ func (l *LabelOptions) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, (*alias)(l)) } -// EncodeValues implements the query.EncodeValues interface +// EncodeValues implements the query.EncodeValues interface. func (l *LabelOptions) EncodeValues(key string, v *url.Values) error { v.Set(key, strings.Join(*l, ",")) return nil @@ -629,6 +697,46 @@ func ProjectCreationLevel(v ProjectCreationLevelValue) *ProjectCreationLevelValu return Ptr(v) } +// ProjectHookEvent represents a project hook event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +type ProjectHookEvent string + +// List of available project hook events. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +const ( + ProjectHookEventPush ProjectHookEvent = "push_events" + ProjectHookEventTagPush ProjectHookEvent = "tag_push_events" + ProjectHookEventIssues ProjectHookEvent = "issues_events" + ProjectHookEventConfidentialIssues ProjectHookEvent = "confidential_issues_events" + ProjectHookEventNote ProjectHookEvent = "note_events" + ProjectHookEventMergeRequests ProjectHookEvent = "merge_requests_events" + ProjectHookEventJob ProjectHookEvent = "job_events" + ProjectHookEventPipeline ProjectHookEvent = "pipeline_events" + ProjectHookEventWiki ProjectHookEvent = "wiki_page_events" + ProjectHookEventReleases ProjectHookEvent = "releases_events" + ProjectHookEventEmoji ProjectHookEvent = "emoji_events" + ProjectHookEventResourceAccessToken ProjectHookEvent = "resource_access_token_events" +) + +// ResourceGroupProcessMode represents a process mode for a resource group +// within a GitLab project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +type ResourceGroupProcessMode string + +// List of available resource group process modes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +const ( + Unordered ResourceGroupProcessMode = "unordered" + OldestFirst ResourceGroupProcessMode = "oldest_first" + NewestFirst ResourceGroupProcessMode = "newest_first" +) + // SharedRunnersSettingValue determines whether shared runners are enabled for a // group’s subgroups and projects. // @@ -645,7 +753,8 @@ const ( DisabledAndOverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_overridable" DisabledAndUnoverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_unoverridable" - // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated in favor of DisabledAndOverridableSharedRunnersSettingValue + // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated + // in favor of DisabledAndOverridableSharedRunnersSettingValue. DisabledWithOverrideSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_with_override" ) diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go index 5ad87ca00e..f463952ac8 100644 --- a/vendor/github.com/xanzy/go-gitlab/users.go +++ b/vendor/github.com/xanzy/go-gitlab/users.go @@ -17,11 +17,15 @@ package gitlab import ( + "encoding/json" "errors" "fmt" + "io" "net" "net/http" "time" + + "github.com/hashicorp/go-retryablehttp" ) // List a couple of standard errors. @@ -84,6 +88,7 @@ type User struct { LastActivityOn *ISOTime `json:"last_activity_on"` ColorSchemeID int `json:"color_scheme_id"` IsAdmin bool `json:"is_admin"` + IsAuditor bool `json:"is_auditor"` AvatarURL string `json:"avatar_url"` CanCreateGroup bool `json:"can_create_group"` CanCreateProject bool `json:"can_create_project"` @@ -112,6 +117,23 @@ type UserIdentity struct { ExternUID string `json:"extern_uid"` } +// UserAvatar represents a GitLab user avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html +type UserAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *UserAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias UserAvatar + return json.Marshal((*alias)(a)) +} + // ListUsersOptions represents the available ListUsers() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users @@ -188,37 +210,53 @@ func (s *UsersService) GetUser(user int, opt GetUsersOptions, options ...Request // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation type CreateUserOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` - ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"-"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` + SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` } // CreateUser creates a new user. Note only administrators can create new users. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "users", opt, options) + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "users", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "users", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } if err != nil { return nil, nil, err } @@ -236,30 +274,31 @@ func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOpti // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification type ModifyUserOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` - PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` - CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` + SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` } // ModifyUser modifies an existing user. Only administrators can change attributes @@ -267,9 +306,23 @@ type ModifyUserOptions struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + var err error + var req *retryablehttp.Request u := fmt.Sprintf("users/%d", user) - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } if err != nil { return nil, nil, err } @@ -1507,3 +1560,32 @@ func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (* return usr, resp, nil } + +// UploadAvatar uploads an avatar to the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#upload-a-current-user-avatar +func (s *UsersService) UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) { + u := "user/avatar" + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/validate.go b/vendor/github.com/xanzy/go-gitlab/validate.go index 1a0f56a148..cb79ac8380 100644 --- a/vendor/github.com/xanzy/go-gitlab/validate.go +++ b/vendor/github.com/xanzy/go-gitlab/validate.go @@ -121,6 +121,8 @@ func (s *ValidateService) ProjectNamespaceLint(pid interface{}, opt *ProjectName // GitLab API docs: // https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration type ProjectLintOptions struct { + ContentRef *string `url:"content_ref,omitempty" json:"content_ref,omitempty"` + DryRunRef *string `url:"dry_run_ref,omitempty" json:"dry_run_ref,omitempty"` DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` IncludeJobs *bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` Ref *string `url:"ref,omitempty" json:"ref,omitempty"` diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73c9..6aae83bfd2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cabf645a5b..5d6e6156b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -29,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5aa..a01bfafbe0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -19,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -44,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -111,7 +103,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. @@ -205,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c71..56b24b982a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1fc15019e6..33580a35b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,18 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -36,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -46,9 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -76,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -87,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -97,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -105,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -143,12 +101,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -178,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -195,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,61 +164,48 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) - - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), + })...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) -} - -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000..a945f55661 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000..aea171fb26 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 0000000000..9cae4cab86 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,165 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (s HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 0000000000..745b8c67bc --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 0000000000..e6e14924f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 0000000000..c999b05e67 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + "slices" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d31..7aa5f99e81 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 0efd5261f6..a73bb06e90 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d3a06e0cad..b80a1db61f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -205,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a1805..ea504e396f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 43e937a67a..b4119d3438 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -22,15 +11,16 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -38,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -65,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -88,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -148,49 +142,56 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -202,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 35254e888f..502c1bdafc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.49.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 2852ec9717..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c7..6bf3abc41e 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3b..e2cb3ea944 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382..d9abe194d9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,11 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +24,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +130,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d20438..6107c17b89 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,218 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +351,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +383,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +1027,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1859,7 +2072,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2849,7 +3062,12 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2928,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 @@ -2937,3 +3158,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d922..5904bb7070 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac55b..b7402576f9 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -615,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -633,11 +644,13 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6de95219be..070b1e57df 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +96,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -178,7 +164,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -192,40 +178,37 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,23 +217,23 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check @@ -263,15 +246,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +258,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +274,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 7766259a5c..657df34710 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -47,23 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -100,12 +106,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd8..59992984d4 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions @@ -63,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 0000000000..5b3da8f14c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424df..eef51ebc2a 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc5766c..318e42fcab 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d59..be9cd922d8 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271fb7..f2ba89ce4b 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e43..d9a22c6502 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce058..3028f9a40f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da51450..bff9c7fdbb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c09..9ea0ecbbd2 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 0000000000..7d798435e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77d5..b3569e95e5 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -54,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -67,11 +63,15 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -84,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -125,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -148,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -213,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -232,14 +257,18 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -252,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -304,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. - value, err := url.PathUnescape(val) + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -324,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -341,13 +405,18 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -458,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -479,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -538,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -616,11 +693,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -641,6 +720,113 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,19 +842,10 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { @@ -679,12 +856,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b7564..a572461a05 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100df6..b51d87cab7 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 0000000000..24c52b387d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4e3..2acbac3546 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" @@ -94,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb4b..ee8db448b8 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e88..921f85961a 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of @@ -28,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad85412..67414c71e0 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d37..93e80ea306 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3bbb..07623b6791 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3f2..822d847947 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408e6..b4f85f44a9 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d9c..3aea9c491f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e9e..4259f0320d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b830479..c657ff8e75 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c2067..3a0cc42f6a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2b7..adbca7d347 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e25..cfd1df9bfa 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -175,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -241,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c2f..38560ff991 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfdc0..204ea142a5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12a3..e31f442b48 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // @@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e794000..9b1da2c02b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" @@ -35,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -47,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5d8..6de7f2e4d8 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f955171951..1e6473b32f 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 0000000000..0cf902e01f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e8d..cf23db7780 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf020..c82ba5324e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d748..d9e3b13e4d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13b3..f153745b00 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 0000000000..1f6e0efa73 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2e6..1a9dc68093 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd5334..ea52e40233 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74af..14e08c24a4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,41 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,7 +91,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -88,7 +106,12 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -98,23 +121,51 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,7 +175,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -134,7 +190,12 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -144,6 +205,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -189,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 0000000000..bb89694356 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 0000000000..ca6fcbdc09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae6a..8403a4bad2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32fc..783fdfba77 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32c0..2fd9497338 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 0000000000..e2959ac747 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1cbf..552263ba73 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb2858..33a3baf15f 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f73a..8c8286aab4 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d62221..6870e316dc 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 0000000000..8c5ac55ca9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e1384..ab09daf9d5 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 0000000000..f81b1576ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 0000000000..06e6d86854 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go index 6e923acab4..a4faa6a03d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package instrumentation provides types to represent the code libraries that // provide OpenTelemetry instrumentation. These types are used in the diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index 39f025a171..f2cdf3c651 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -1,19 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 09c6d93f6d..728115045b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 59dcfab250..07923ed8d9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package env // import "go.opentelemetry.io/otel/sdk/internal/env" @@ -33,7 +22,7 @@ const ( BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. // 512). Note: it must be less than or equal to - // EnvBatchSpanProcessorMaxQueueSize. + // BatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" // AttributeValueLengthKey is the maximum allowed attribute value size. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go deleted file mode 100644 index bd84f624b4..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 0000000000..fab61647c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 0000000000..68d296cbed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 0000000000..4ad864d716 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index aed756c5e7..95a61d61d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6a2c08293a..6ac1cdbf7b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -20,9 +9,11 @@ import ( "os" "path/filepath" + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( @@ -47,6 +38,8 @@ type ( } defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} ) var ( @@ -54,6 +47,7 @@ var ( _ Detector = host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. @@ -106,3 +100,19 @@ func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) }, ).Detect(ctx) } + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index f263919f6e..0d6e213d92 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index c1b47193fe..5ecd859a52 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -22,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go index d55a50b0dc..64939a2713 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package resource provides detecting and representing resources. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index be4cbe423e..813f056242 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -23,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index f579329c2c..2d0f65498a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -19,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index 1778bbacf0..cc8b8938ed 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris // +build dragonfly freebsd netbsd openbsd solaris diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go index ba41409b23..b09fde3b73 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go index 207acb0ed3..d9e5d1a8ff 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index 410579b8fc..f84f173240 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build linux // +build linux diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go index 721e3ca6e7..6354b35602 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index 89df9d6882..df12c44c56 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -1,25 +1,8 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 -// +build !darwin -// +build !dragonfly -// +build !freebsd -// +build !linux -// +build !netbsd -// +build !openbsd -// +build !solaris -// +build !windows +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 5b431c6ee6..71386e2da4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build windows // +build windows diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8fbf071c17..8a48ab4fa3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -19,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go index 24ec85793d..ce455dc544 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index c771942dee..f537e5ca5c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index 1c84afc185..a6ff26a4d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index 3ebcb534f2..a77742b077 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -1,27 +1,8 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 -// +build !aix -// +build !darwin -// +build !dragonfly -// +build !freebsd -// +build !linux -// +build !netbsd -// +build !openbsd -// +build !solaris -// +build !windows -// +build !zos +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index faad64d8da..5e3d199d78 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 739ea4512a..085fe68fd7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -22,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index cb1ee0a9ce..ad4b50df40 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -22,6 +11,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" ) // Resource describes an entity about which identifying information @@ -229,11 +219,17 @@ func Empty() *Resource { func Default() *Resource { defaultResourceOnce.Do(func() { var err error - defaultResource, err = Detect( - context.Background(), + defaultDetectors := []Detector{ defaultServiceNameDetector{}, fromEnv{}, telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., ) if err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md new file mode 100644 index 0000000000..f2936e1439 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md @@ -0,0 +1,3 @@ +# SDK Trace + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index fca26f2e70..1d399a75db 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -392,7 +381,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R } } -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 0285e99be0..1f60524e3e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace contains support for OpenTelemetry distributed tracing. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go index 1e3b426757..60a7ed1349 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index d1c86e59b2..821c83faa1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -1,36 +1,45 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + // evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue struct { - queue []interface{} +type evictedQueue[T any] struct { + queue []T capacity int droppedCount int + logDropped func() } -func newEvictedQueue(capacity int) evictedQueue { +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. - return evictedQueue{capacity: capacity} + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } } // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. -func (eq *evictedQueue) add(value interface{}) { +func (eq *evictedQueue[T]) add(value T) { if eq.capacity == 0 { eq.droppedCount++ + eq.logDropped() return } @@ -39,6 +48,12 @@ func (eq *evictedQueue) add(value interface{}) { copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] eq.droppedCount++ + eq.logDropped() } eq.queue = append(eq.queue, value) } + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index bba246041a..925bcf9930 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -52,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return sid } @@ -62,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go index 19cfea4ba4..c03bdc90f6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index b1ac608464..14c2e5bebd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -302,7 +291,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { retErr = err } else { // Poor man's list of errors - retErr = fmt.Errorf("%v; %v", retErr, err) + retErr = fmt.Errorf("%w; %w", retErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index 02053b318a..d2d1f72466 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index a7bc125b9e..ebb6df6c90 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index f8770fff79..554111bb4a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -36,10 +25,10 @@ var _ SpanProcessor = (*simpleSpanProcessor)(nil) // send completed spans to the exporter immediately. // // This SpanProcessor is not recommended for production use. The synchronous -// nature of this SpanProcessor make it good for testing, debugging, or -// showing examples of other feature, but it will be slow and have a high -// computation resource usage overhead. The BatchSpanProcessor is recommended -// for production use instead. +// nature of this SpanProcessor makes it good for testing, debugging, or showing +// examples of other features, but it will be slow and have a high computation +// resource usage overhead. The BatchSpanProcessor is recommended for production +// use instead. func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, @@ -80,10 +69,10 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { // // A closure is used to keep reference to the exporter and then the // field is zeroed. This ensures the simpleSpanProcessor is shut down - // before the exporter. This order is important as it avoids a - // potential deadlock. If the exporter shut down operation generates a - // span, that span would need to be exported. Meaning, OnEnd would be - // called and try acquiring the lock that is held here. + // before the exporter. This order is important as it avoids a potential + // deadlock. If the exporter shut down operation generates a span, that + // span would need to be exported. Meaning, OnEnd would be called and + // try acquiring the lock that is held here. ssp.exporterMu.Lock() done, shutdown := stopFunc(ssp.exporter) ssp.exporter = nil @@ -95,15 +84,15 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { select { case err = <-done: case <-ctx.Done(): - // It is possible for the exporter to have immediately shut down - // and the context to be done simultaneously. In that case this - // outer select statement will randomly choose a case. This will - // result in a different returned error for similar scenarios. - // Instead, double check if the exporter shut down at the same - // time and return that error if so. This will ensure consistency - // as well as ensure the caller knows the exporter shut down - // successfully (they can already determine if the deadline is - // expired given they passed the context). + // It is possible for the exporter to have immediately shut down and + // the context to be done simultaneously. In that case this outer + // select statement will randomly choose a case. This will result in + // a different returned error for similar scenarios. Instead, double + // check if the exporter shut down at the same time and return that + // error if so. This will ensure consistency as well as ensure + // the caller knows the exporter shut down successfully (they can + // already determine if the deadline is expired given they passed + // the context). select { case err = <-done: default: @@ -119,7 +108,8 @@ func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { return nil } -// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +// MarshalLog is the marshaling function used by the logging system to represent +// this Span Processor. func (ssp *simpleSpanProcessor) MarshalLog() interface{} { return struct { Type string diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 0349b2f198..d511d0f271 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -110,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 85bc702a01..4945f50830 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -20,6 +9,7 @@ import ( "reflect" "runtime" rt "runtime/trace" + "slices" "strings" "sync" "time" @@ -27,10 +17,10 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -72,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -147,12 +137,13 @@ type recordingSpan struct { // ReadOnlySpan exported when the span ends. attributes []attribute.KeyValue droppedAttributes int + logDropAttrsOnce sync.Once // events are stored in FIFO queue capped by configured limit. - events evictedQueue + events evictedQueue[Event] // links are stored in FIFO queue capped by configured limit. - links evictedQueue + links evictedQueue[Link] // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() @@ -208,16 +199,6 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { s.status = status } -// ensureAttributesCapacity inlines functionality from slices.Grow -// so that we can avoid needing to import golang.org/x/exp for go1.20. -// Once support for go1.20 is dropped, we can use slices.Grow available since go1.21 instead. -// Tracking issue: https://github.com/open-telemetry/opentelemetry-go/issues/4819. -func (s *recordingSpan) ensureAttributesCapacity(minCapacity int) { - if n := minCapacity - cap(s.attributes); n > 0 { - s.attributes = append(s.attributes[:cap(s.attributes)], make([]attribute.KeyValue, n)...)[:len(s.attributes)] - } -} - // SetAttributes sets attributes of this span. // // If a key from attributes already exists the value associated with that key @@ -239,7 +220,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { // No attributes allowed. - s.droppedAttributes += len(attributes) + s.addDroppedAttr(len(attributes)) return } @@ -252,11 +233,11 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. - s.ensureAttributesCapacity(len(s.attributes) + len(attributes)) + s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) @@ -264,6 +245,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { } } +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that // exceed the limit. @@ -288,16 +285,12 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. - if sum := len(attrs) + len(s.attributes); sum < limit { - // After support for go1.20 is dropped, simplify if-else to min(sum, limit). - s.ensureAttributesCapacity(sum) - } else { - s.ensureAttributesCapacity(limit) - } + sum := len(attrs) + len(s.attributes) + s.attributes = slices.Grow(s.attributes, min(sum, limit)) for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } @@ -310,7 +303,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. - s.droppedAttributes++ + s.addDroppedAttr(1) } else { a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) @@ -391,7 +384,7 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // Store the end time as soon as possible to avoid artificially increasing // the span's duration in case some operation below takes a while. - et := internal.MonotonicEndTime(s.startTime) + et := monotonicEndTime(s.startTime) // Do relative expensive check now that we have an end time and see if we // need to do any more processing. @@ -442,6 +435,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } } +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + // RecordError will record err as a span event for this span. An additional call to // SetStatus is required if the Status of the Span should be set to Error, this method // does not change the Span status. If this span is not being recorded or err is nil @@ -609,7 +612,7 @@ func (s *recordingSpan) Links() []Link { if len(s.links.queue) == 0 { return []Link{} } - return s.interfaceArrayToLinksArray() + return s.links.copy() } // Events returns the events of this span. @@ -619,7 +622,7 @@ func (s *recordingSpan) Events() []Event { if len(s.events.queue) == 0 { return []Event{} } - return s.interfaceArrayToEventArray() + return s.events.copy() } // Status returns the status of this span. @@ -639,7 +642,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope @@ -653,8 +656,12 @@ func (s *recordingSpan) Resource() *resource.Resource { return s.tracer.provider.resource } -func (s *recordingSpan) addLink(link trace.Link) { - if !s.IsRecording() || !link.SpanContext.IsValid() { +func (s *recordingSpan) AddLink(link trace.Link) { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { return } @@ -737,32 +744,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } sd.droppedAttributeCount = s.droppedAttributes if len(s.events.queue) > 0 { - sd.events = s.interfaceArrayToEventArray() + sd.events = s.events.copy() sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { - sd.links = s.interfaceArrayToLinksArray() + sd.links = s.links.copy() sd.droppedLinkCount = s.links.droppedCount } return &sd } -func (s *recordingSpan) interfaceArrayToLinksArray() []Link { - linkArr := make([]Link, 0) - for _, value := range s.links.queue { - linkArr = append(linkArr, value.(Link)) - } - return linkArr -} - -func (s *recordingSpan) interfaceArrayToEventArray() []Event { - eventArr := make([]Event, 0) - for _, value := range s.events.queue { - eventArr = append(eventArr, value.(Event)) - } - return eventArr -} - func (s *recordingSpan) addChild() { if !s.IsRecording() { return @@ -827,6 +818,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go index c9bd52f7ad..6bdda3d94a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index aa4d4221db..bec5e20978 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go index 9c53657a71..af7f9177fc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 301e1a7abc..43419d3b54 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" @@ -143,13 +132,13 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa spanKind: trace.ValidateSpanKind(config.SpanKind()), name: name, startTime: startTime, - events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), tracer: tr, } for _, l := range config.Links() { - s.addLink(l) + s.AddLink(l) } s.SetAttributes(sr.Attributes...) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go index d3457ed135..b84dd2c5ee 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 42de0b9a7c..b7cede891c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.24.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go index 12d6b520f5..09e094de9a 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index 4a711133a0..1a820bdb30 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" @@ -72,7 +61,7 @@ func (c *NetConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -263,7 +252,7 @@ func (c *NetConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -320,5 +309,5 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bit size of 16 checked above. } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 0000000000..87b842c5d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f7748d..e087c9c04d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4de..c7b804bbe2 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559de4..137acc67de 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c136..d318221e59 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md new file mode 100644 index 0000000000..18ee2f3d62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 HTTP conv + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go index fc43808fe4..76d1dc86b4 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3a6..7e365e82ce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d16..634a1dce07 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299d2..21497bb6bc 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 0000000000..82e1f46b4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c44d..6685c392b5 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a6962..0d1f55a8fe 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d2b..6377639321 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfdbf..f40c97825a 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede95f..9c1840631b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b906742a..3d44dae275 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3b9..95d0210e38 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 8517741485..90b1b0452c 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 31726598d6..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4398 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go deleted file mode 100644 index cd3c716295..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index 69eda1959f..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2556 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go deleted file mode 100644 index 9733ce888a..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index 397174818b..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1334 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 0000000000..2de1fc3c6b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 0000000000..d8dc822b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 0000000000..d031bbea78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 0000000000..bfaee0d56e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 0000000000..fcdb9f4859 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 0000000000..4c87c7adcc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249de8..6836c65478 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 0000000000..58ccaba69b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66cf7..273d58e001 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083c4..5650a174b4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d7565..d661c5d100 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 0000000000..7754a239ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a754..3e359a00bf 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb81611..c00221e7be 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491cae..ca20e9997a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 0000000000..cd382c82a1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 7f485543c4..64a4f1b362 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package noop provides an implementation of the OpenTelemetry trace API that // produces no telemetry and minimizes used computation resources. @@ -78,11 +67,13 @@ func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) span = Span{sc: sc} } else { // No parent, return a No-Op span with an empty span context. - span = Span{} + span = noopSpanInstance } return trace.ContextWithSpan(ctx, span), span } +var noopSpanInstance trace.Span = Span{} + // Span is an OpenTelemetry No-Op Span. type Span struct { embedded.Span @@ -111,6 +102,9 @@ func (Span) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (Span) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + // SetName does nothing. func (Span) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b2260e..d49adf671b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,28 +1,12 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -337,241 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5b7..dc5e34cad0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -271,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh index dbb61a4227..e57bf57fce 100644 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -1,18 +1,7 @@ #!/bin/bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 0000000000..1e87855eea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1fe..f67039ed1f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e6782..3ba611d713 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,20 +1,9 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -40,16 +29,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.5.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.8 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.step.sm/crypto/internal/utils/io.go b/vendor/go.step.sm/crypto/internal/utils/io.go index ba85976a45..ccccf5f94f 100644 --- a/vendor/go.step.sm/crypto/internal/utils/io.go +++ b/vendor/go.step.sm/crypto/internal/utils/io.go @@ -2,10 +2,13 @@ package utils import ( "bytes" + "io" "os" "unicode" "github.com/pkg/errors" + + "go.step.sm/crypto/internal/utils/utfbom" ) func maybeUnwrap(err error) error { @@ -15,15 +18,32 @@ func maybeUnwrap(err error) error { return err } -// ReadFile reads the file named by filename and returns the contents. -// -// It wraps os.ReadFile wrapping the errors. -func ReadFile(filename string) ([]byte, error) { - b, err := os.ReadFile(filename) +// stdinFilename is the name of the file that is used in many command +// line utilities to denote input is to be read from STDIN. +const stdinFilename = "-" + +// stdin points to STDIN through os.Stdin. +var stdin = os.Stdin + +// ReadFile reads the file identified by filename and returns +// the contents. If filename is equal to "-", it will read from +// STDIN. +func ReadFile(filename string) (b []byte, err error) { + if filename == stdinFilename { + filename = "/dev/stdin" + b, err = io.ReadAll(stdin) + } else { + var contents []byte + contents, err = os.ReadFile(filename) + if err != nil { + return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) + } + b, err = io.ReadAll(utfbom.SkipOnly(bytes.NewReader(contents))) + } if err != nil { - return nil, errors.Wrapf(maybeUnwrap(err), "error reading %s", filename) + return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) } - return b, nil + return } // ReadPasswordFromFile reads and returns the password from the given filename. diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE b/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE new file mode 100644 index 0000000000..6279cb87f4 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md b/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md new file mode 100644 index 0000000000..8ece280089 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go b/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go new file mode 100644 index 0000000000..93a144fd2c --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go @@ -0,0 +1,195 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +// +// This package was copied from https://github.com/dimchansky/utfbom. Only minor changes +// were made to not depend on the io/ioutil package and to make our linters pass. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && !errors.Is(err, io.EOF)) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { //nolint:wastedassign // copied code + if n, err = rd.Read(bom[len(buf):]); n < 0 { + return nil, errNegativeRead + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/go.step.sm/crypto/jose/parse.go b/vendor/go.step.sm/crypto/jose/parse.go index 9807af03d6..760c4f161f 100644 --- a/vendor/go.step.sm/crypto/jose/parse.go +++ b/vendor/go.step.sm/crypto/jose/parse.go @@ -267,6 +267,9 @@ func ParseX5cInsecure(tok string, roots []*x509.Certificate) (*JSONWebToken, [][ Intermediates: interPool, // A hack so we skip validity period validation. CurrentTime: leaf.NotAfter.Add(-1 * time.Minute), + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + }, }) if err != nil { return nil, nil, errors.Wrap(err, "error verifying x5cInsecure certificate chain") diff --git a/vendor/go.step.sm/crypto/keyutil/key.go b/vendor/go.step.sm/crypto/keyutil/key.go index 76e15ae6e4..171cdf3f6e 100644 --- a/vendor/go.step.sm/crypto/keyutil/key.go +++ b/vendor/go.step.sm/crypto/keyutil/key.go @@ -219,8 +219,8 @@ func generateECKey(crv string) (crypto.Signer, error) { } func generateRSAKey(bits int) (crypto.Signer, error) { - if min := MinRSAKeyBytes * 8; !insecureMode.isSet() && bits < min { - return nil, errors.Errorf("the size of the RSA key should be at least %d bits", min) + if minBits := MinRSAKeyBytes * 8; !insecureMode.isSet() && bits < minBits { + return nil, errors.Errorf("the size of the RSA key should be at least %d bits", minBits) } key, err := rsa.GenerateKey(rand.Reader, bits) diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go index c1d289318d..9202510d2d 100644 --- a/vendor/go.step.sm/crypto/pemutil/pem.go +++ b/vendor/go.step.sm/crypto/pemutil/pem.go @@ -5,6 +5,7 @@ package pemutil import ( "bytes" + "crypto/ecdh" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" @@ -16,6 +17,7 @@ import ( "fmt" "math/big" "os" + "strings" "github.com/pkg/errors" "go.step.sm/crypto/internal/utils" @@ -229,149 +231,201 @@ func ParseCertificate(pemData []byte) (*x509.Certificate, error) { return nil, errors.New("error parsing certificate: no certificate found") } -// ParseCertificateBundle extracts all the certificates in the given data. -func ParseCertificateBundle(pemData []byte) ([]*x509.Certificate, error) { - var block *pem.Block - var certs []*x509.Certificate - for len(pemData) > 0 { - block, pemData = pem.Decode(pemData) - if block == nil { - return nil, errors.New("error decoding pem block") +// ParseCertificateBundle returns a list of *x509.Certificate parsed from +// the given bytes. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. +func ParseCertificateBundle(data []byte) ([]*x509.Certificate, error) { + var err error + + // PEM format + if bytes.Contains(data, PEMBlockHeader) { + var block *pem.Block + var bundle []*x509.Certificate + for len(data) > 0 { + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + var crt *x509.Certificate + crt, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, &InvalidPEMError{ + Err: err, + Type: PEMTypeCertificate, + } + } + bundle = append(bundle, crt) } - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue + if len(bundle) == 0 { + return nil, &InvalidPEMError{ + Type: PEMTypeCertificate, + } } + return bundle, nil + } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing certificate") + // DER format (binary) + crt, err := x509.ParseCertificate(data) + if err != nil { + return nil, &InvalidPEMError{ + Message: fmt.Sprintf("error parsing certificate as DER format: %v", err), + Type: PEMTypeCertificate, } - certs = append(certs, cert) - } - if len(certs) == 0 { - return nil, errors.New("error parsing certificate: no certificate found") } - return certs, nil + return []*x509.Certificate{crt}, nil } -// ParseCertificateRequest extracts the first certificate from the given pem. -func ParseCertificateRequest(pemData []byte) (*x509.CertificateRequest, error) { - var block *pem.Block - for len(pemData) > 0 { - block, pemData = pem.Decode(pemData) - if block == nil { - return nil, errors.New("error decoding pem block") +// ParseCertificateRequest extracts the first *x509.CertificateRequest +// from the given data. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. +func ParseCertificateRequest(data []byte) (*x509.CertificateRequest, error) { + // PEM format + if bytes.Contains(data, PEMBlockHeader) { + var block *pem.Block + for len(data) > 0 { + block, data = pem.Decode(data) + if block == nil { + break + } + if !strings.HasSuffix(block.Type, "CERTIFICATE REQUEST") { + continue + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, &InvalidPEMError{ + Type: PEMTypeCertificateRequest, + Err: err, + } + } + + return csr, nil } - if (block.Type != "CERTIFICATE REQUEST" && block.Type != "NEW CERTIFICATE REQUEST") || - len(block.Headers) != 0 { - continue + } + + // DER format (binary) + csr, err := x509.ParseCertificateRequest(data) + if err != nil { + return nil, &InvalidPEMError{ + Message: fmt.Sprintf("error parsing certificate request as DER format: %v", err), + Type: PEMTypeCertificateRequest, } + } + return csr, nil +} - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing certificate request") +// PEMType represents a PEM block type. (e.g., CERTIFICATE, CERTIFICATE REQUEST, etc.) +type PEMType int + +func (pt PEMType) String() string { + switch pt { + case PEMTypeCertificate: + return "certificate" + case PEMTypeCertificateRequest: + return "certificate request" + default: + return "undefined" + } +} + +const ( + // PEMTypeUndefined undefined + PEMTypeUndefined = iota + // PEMTypeCertificate CERTIFICATE + PEMTypeCertificate + // PEMTypeCertificateRequest CERTIFICATE REQUEST + PEMTypeCertificateRequest +) + +// InvalidPEMError represents an error that occurs when parsing a file with +// PEM encoded data. +type InvalidPEMError struct { + Type PEMType + File string + Message string + Err error +} + +func (e *InvalidPEMError) Error() string { + switch { + case e.Message != "": + return e.Message + case e.Err != nil: + return fmt.Sprintf("error decoding PEM data: %v", e.Err) + default: + if e.Type == PEMTypeUndefined { + return "does not contain valid PEM encoded data" } - return csr, nil + return fmt.Sprintf("does not contain a valid PEM encoded %s", e.Type) } +} - return nil, errors.New("error parsing certificate request: no certificate found") +func (e *InvalidPEMError) Unwrap() error { + return e.Err } // ReadCertificate returns a *x509.Certificate from the given filename. It // supports certificates formats PEM and DER. func ReadCertificate(filename string, opts ...Options) (*x509.Certificate, error) { - b, err := utils.ReadFile(filename) - if err != nil { + // Populate options + ctx := newContext(filename) + if err := ctx.apply(opts); err != nil { return nil, err } - // PEM format - if bytes.Contains(b, PEMBlockHeader) { - var crt interface{} - crt, err = Read(filename, opts...) - if err != nil { - return nil, err - } - switch crt := crt.(type) { - case *x509.Certificate: - return crt, nil - default: - return nil, errors.Errorf("error decoding PEM: file '%s' does not contain a certificate", filename) - } + bundle, err := ReadCertificateBundle(filename) + switch { + case err != nil: + return nil, err + case len(bundle) == 0: + return nil, errors.Errorf("file %s does not contain a valid PEM or DER formatted certificate", filename) + case len(bundle) > 1 && !ctx.firstBlock: + return nil, errors.Errorf("error decoding %s: contains more than one PEM encoded block", filename) + default: + return bundle[0], nil } - - // DER format (binary) - crt, err := x509.ParseCertificate(b) - return crt, errors.Wrapf(err, "error parsing %s", filename) } -// ReadCertificateBundle returns a list of *x509.Certificate from the given -// filename. It supports certificates formats PEM and DER. If a DER-formatted -// file is given only one certificate will be returned. +// ReadCertificateBundle reads the given filename and returns a list of +// *x509.Certificate. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. func ReadCertificateBundle(filename string) ([]*x509.Certificate, error) { b, err := utils.ReadFile(filename) if err != nil { return nil, err } - // PEM format - if bytes.Contains(b, PEMBlockHeader) { - var block *pem.Block - var bundle []*x509.Certificate - for len(b) > 0 { - block, b = pem.Decode(b) - if block == nil { - break - } - if block.Type != "CERTIFICATE" { - return nil, errors.Errorf("error decoding PEM: file '%s' is not a certificate bundle", filename) - } - var crt *x509.Certificate - crt, err = x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrapf(err, "error parsing %s", filename) - } - bundle = append(bundle, crt) - } - if len(b) > 0 { - return nil, errors.Errorf("error decoding PEM: file '%s' contains unexpected data", filename) - } - return bundle, nil - } - - // DER format (binary) - crt, err := x509.ParseCertificate(b) + bundle, err := ParseCertificateBundle(b) if err != nil { - return nil, errors.Wrapf(err, "error parsing %s", filename) + return nil, fmt.Errorf("error parsing %s: %w", filename, err) } - return []*x509.Certificate{crt}, nil + return bundle, nil } -// ReadCertificateRequest returns a *x509.CertificateRequest from the given -// filename. It supports certificates formats PEM and DER. +// ReadCertificateRequest reads the given filename and returns a +// *x509.CertificateRequest. +// +// - supports PEM and DER Certificate formats. +// - supports reading from STDIN with filename `-`. func ReadCertificateRequest(filename string) (*x509.CertificateRequest, error) { b, err := utils.ReadFile(filename) if err != nil { return nil, err } - // PEM format - if bytes.Contains(b, PEMBlockHeader) { - csr, err := Parse(b, WithFilename(filename)) - if err != nil { - return nil, err - } - switch csr := csr.(type) { - case *x509.CertificateRequest: - return csr, nil - default: - return nil, errors.Errorf("error decoding PEM: file '%s' does not contain a certificate request", filename) - } + cr, err := ParseCertificateRequest(b) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %w", filename, err) } - - // DER format (binary) - csr, err := x509.ParseCertificateRequest(b) - return csr, errors.Wrapf(err, "error parsing %s", filename) + return cr, nil } // Parse returns the key or certificate PEM-encoded in the given bytes. @@ -586,7 +640,6 @@ func Serialize(in interface{}, opts ...Options) (*pem.Block, error) { } } else { var err error - //nolint:staticcheck // required for legacy compatibility p, err = x509.EncryptPEMBlock(rand.Reader, p.Type, p.Bytes, password, DefaultEncCipher) if err != nil { return nil, errors.Wrap(err, "failed to serialize to PEM") @@ -673,24 +726,48 @@ func ParseSSH(b []byte) (interface{}, error) { return nil, errors.Wrap(err, "error unmarshaling key") } - key := new(ecdsa.PublicKey) + var c ecdh.Curve switch w.Name { case ssh.KeyAlgoECDSA256: - key.Curve = elliptic.P256() + c = ecdh.P256() case ssh.KeyAlgoECDSA384: - key.Curve = elliptic.P384() + c = ecdh.P384() case ssh.KeyAlgoECDSA521: - key.Curve = elliptic.P521() + c = ecdh.P521() default: return nil, errors.Errorf("unsupported ecdsa curve %s", w.Name) } - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, errors.New("invalid ecdsa curve point") + var p *ecdh.PublicKey + if p, err = c.NewPublicKey(w.KeyBytes); err != nil { + return nil, errors.Wrapf(err, "failed decoding %s key", w.Name) + } + + // convert ECDH public key to ECDSA public key to keep + // the returned type backwards compatible. + rawKey := p.Bytes() + switch p.Curve() { + case ecdh.P256(): + return &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: big.NewInt(0).SetBytes(rawKey[1:33]), + Y: big.NewInt(0).SetBytes(rawKey[33:]), + }, nil + case ecdh.P384(): + return &ecdsa.PublicKey{ + Curve: elliptic.P384(), + X: big.NewInt(0).SetBytes(rawKey[1:49]), + Y: big.NewInt(0).SetBytes(rawKey[49:]), + }, nil + case ecdh.P521(): + return &ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: big.NewInt(0).SetBytes(rawKey[1:67]), + Y: big.NewInt(0).SetBytes(rawKey[67:]), + }, nil + default: + return nil, errors.New("cannot convert non-NIST *ecdh.PublicKey to *ecdsa.PublicKey") } - return key, nil - case ssh.KeyAlgoED25519: var w struct { Name string @@ -700,10 +777,8 @@ func ParseSSH(b []byte) (interface{}, error) { return nil, errors.Wrap(err, "error unmarshaling key") } return ed25519.PublicKey(w.KeyBytes), nil - case ssh.KeyAlgoDSA: - return nil, errors.Errorf("step does not support DSA keys") - + return nil, errors.Errorf("DSA keys not supported") default: return nil, errors.Errorf("unsupported key type %T", key) } diff --git a/vendor/go.step.sm/crypto/pemutil/pkcs8.go b/vendor/go.step.sm/crypto/pemutil/pkcs8.go index 35d30db93d..fb6c96c295 100644 --- a/vendor/go.step.sm/crypto/pemutil/pkcs8.go +++ b/vendor/go.step.sm/crypto/pemutil/pkcs8.go @@ -174,7 +174,6 @@ func (c rfc1423Algo) deriveKey(password, salt []byte, h func() hash.Hash) []byte // key derived using PBKDF2 over the given password. func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - //nolint:staticcheck // required for legacy compatibility return x509.DecryptPEMBlock(block, password) } diff --git a/vendor/go.step.sm/crypto/pemutil/ssh.go b/vendor/go.step.sm/crypto/pemutil/ssh.go index e31258e1d4..00698dae19 100644 --- a/vendor/go.step.sm/crypto/pemutil/ssh.go +++ b/vendor/go.step.sm/crypto/pemutil/ssh.go @@ -10,7 +10,6 @@ import ( "crypto/cipher" "crypto/ecdsa" "crypto/ed25519" - "crypto/elliptic" "crypto/rand" "crypto/rsa" "encoding/binary" @@ -188,7 +187,10 @@ func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Bl return nil, errors.Errorf("error serializing key: unsupported curve %s", k.Curve.Params().Name) } - pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) + p, err := k.PublicKey.ECDH() + if err != nil { + return nil, errors.Wrapf(err, "failed converting *ecdsa.PublicKey to *ecdh.PublicKey") + } // Marshal public key. pubKey := struct { @@ -196,7 +198,7 @@ func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Bl Curve string Pub []byte }{ - keyType, curve, pub, + keyType, curve, p.Bytes(), } w.PubKey = ssh.Marshal(pubKey) @@ -207,7 +209,7 @@ func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Bl D *big.Int Comment string }{ - curve, pub, k.D, + curve, p.Bytes(), k.D, ctx.comment, } pk1.Keytype = keyType diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000000..37dc0cfdb5 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/api/idtoken/idtoken.go b/vendor/google.golang.org/api/idtoken/idtoken.go index bd29a95a3c..477164f904 100644 --- a/vendor/google.golang.org/api/idtoken/idtoken.go +++ b/vendor/google.golang.org/api/idtoken/idtoken.go @@ -16,6 +16,8 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" + newidtoken "cloud.google.com/go/auth/credentials/idtoken" + "cloud.google.com/go/auth/oauth2adapt" "google.golang.org/api/impersonate" "google.golang.org/api/internal" "google.golang.org/api/option" @@ -95,9 +97,29 @@ func NewTokenSource(ctx context.Context, audience string, opts ...ClientOption) if ds.ImpersonationConfig != nil { return nil, fmt.Errorf("idtoken: option.WithImpersonatedCredentials not supported") } + if ds.IsNewAuthLibraryEnabled() { + return newTokenSourceNewAuth(ctx, audience, &ds) + } return newTokenSource(ctx, audience, &ds) } +func newTokenSourceNewAuth(ctx context.Context, audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { + if ds.AuthCredentials != nil { + return nil, fmt.Errorf("idtoken: option.WithTokenProvider not supported") + } + creds, err := newidtoken.NewCredentials(&newidtoken.Options{ + Audience: audience, + CustomClaims: ds.CustomClaims, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + Client: oauth2.NewClient(ctx, nil), + }) + if err != nil { + return nil, err + } + return oauth2adapt.TokenSourceFromTokenProvider(creds), nil +} + func newTokenSource(ctx context.Context, audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { creds, err := internal.Creds(ctx, ds) if err != nil { diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b648930985..4ebeb61c1a 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -15,6 +15,8 @@ import ( "os" "time" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/oauth2adapt" "golang.org/x/oauth2" "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" @@ -27,6 +29,9 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.IsNewAuthLibraryEnabled() { + return credsNewAuth(ctx, ds) + } creds, err := baseCreds(ctx, ds) if err != nil { return nil, err @@ -37,6 +42,78 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + +func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { + // Preserve old options behavior + if settings.InternalCredentials != nil { + return settings.InternalCredentials, nil + } else if settings.Credentials != nil { + return settings.Credentials, nil + } else if settings.TokenSource != nil { + return &google.Credentials{TokenSource: settings.TokenSource}, nil + } + + if settings.AuthCredentials != nil { + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil + } + + var useSelfSignedJWT bool + var aud string + var scopes []string + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if settings.EnableJwtWithScope || len(settings.Audiences) > 0 { + useSelfSignedJWT = true + } + + if len(settings.Scopes) > 0 { + scopes = make([]string, len(settings.Scopes)) + copy(scopes, settings.Scopes) + } + if len(settings.Audiences) > 0 { + aud = settings.Audiences[0] + } + // Only default scopes if user did not also set an audience. + if len(settings.Scopes) == 0 && aud == "" && len(settings.DefaultScopes) > 0 { + scopes = make([]string, len(settings.DefaultScopes)) + copy(scopes, settings.DefaultScopes) + } + if len(scopes) == 0 && aud == "" { + aud = settings.DefaultAudience + } + + creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: scopes, + Audience: aud, + CredentialsFile: settings.CredentialsFile, + CredentialsJSON: settings.CredentialsJSON, + UseSelfSignedJWT: useSelfSignedJWT, + }) + if err != nil { + return nil, err + } + + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil +} + func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.InternalCredentials != nil { return ds.InternalCredentials, nil @@ -44,7 +121,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { @@ -89,19 +166,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g var params google.CredentialsParams params.Scopes = ds.GetScopes() - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, err := getClientCertificateSource(ds) + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - params.TokenURL = oAuth2Endpoint(clientCertSource) - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) @@ -226,14 +296,3 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } - -// ErrUniverseNotMatch composes an error string from the provided universe -// domain sources (DialSettings and Credentials, respectively). -func ErrUniverseNotMatch(settingsUD, credsUD string) error { - return fmt.Errorf( - "the configured universe domain (%q) does not match the universe "+ - "domain found in the credentials (%q). If you haven't configured "+ - "WithUniverseDomain explicitly, \"googleapis.com\" is the default", - settingsUD, - credsUD) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index af4a038d3e..32949cccbd 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -13,6 +13,7 @@ import ( "strconv" "time" + "cloud.google.com/go/auth" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/internal/impersonate" @@ -20,8 +21,10 @@ import ( ) const ( - newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" - universeDomainDefault = "googleapis.com" + newAuthLibEnvVar = "GOOGLE_API_GO_EXPERIMENTAL_ENABLE_NEW_AUTH_LIB" + newAuthLibDisabledEnVar = "GOOGLE_API_GO_EXPERIMENTAL_DISABLE_NEW_AUTH_LIB" + universeDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + defaultUniverseDomain = "googleapis.com" ) // DialSettings holds information needed to establish a connection with a @@ -56,15 +59,17 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool - EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool - UniverseDomain string DefaultUniverseDomain string - + UniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters QuotaProject string RequestReason string + + // New Auth library Options + AuthCredentials *auth.Credentials + EnableNewAuthLibrary bool } // GetScopes returns the user-provided scopes, if set, or else falls back to the @@ -91,10 +96,15 @@ func (ds *DialSettings) HasCustomAudience() bool { // IsNewAuthLibraryEnabled returns true if the new auth library should be used. func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + // Disabled env is for future rollouts to make sure there is a way to easily + // disable this behaviour once we switch in on by default. + if b, err := strconv.ParseBool(os.Getenv(newAuthLibDisabledEnVar)); err == nil && b { + return false + } if ds.EnableNewAuthLibrary { return true } - if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnvVar)); err == nil { return b } return false @@ -116,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { @@ -165,36 +175,36 @@ func (ds *DialSettings) Validate() error { return nil } -// GetDefaultUniverseDomain returns the default service domain for a given Cloud -// universe, as configured with internaloption.WithDefaultUniverseDomain. -// The default value is "googleapis.com". +// GetDefaultUniverseDomain returns the Google default universe domain +// ("googleapis.com"). func (ds *DialSettings) GetDefaultUniverseDomain() string { - if ds.DefaultUniverseDomain == "" { - return universeDomainDefault - } - return ds.DefaultUniverseDomain + return defaultUniverseDomain } // GetUniverseDomain returns the default service domain for a given Cloud -// universe, as configured with option.WithUniverseDomain. -// The default value is the value of GetDefaultUniverseDomain, as configured -// with internaloption.WithDefaultUniverseDomain. +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". func (ds *DialSettings) GetUniverseDomain() string { - if ds.UniverseDomain == "" { - return ds.GetDefaultUniverseDomain() + if ds.UniverseDomain != "" { + return ds.UniverseDomain + } + if envUD := os.Getenv(universeDomainEnvVar); envUD != "" { + return envUD } - return ds.UniverseDomain + return defaultUniverseDomain } // IsUniverseDomainGDU returns true if the universe domain is the default Google -// universe. +// universe ("googleapis.com"). func (ds *DialSettings) IsUniverseDomainGDU() bool { - return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() + return ds.GetUniverseDomain() == defaultUniverseDomain } // GetUniverseDomain returns the default service domain for a given Cloud -// universe, from google.Credentials, for comparison with the value returned by -// (*DialSettings).GetUniverseDomain. This wrapper function should be removed +// universe, from google.Credentials. This wrapper function should be removed // to close https://github.com/googleapis/google-api-go-client/issues/2399. func GetUniverseDomain(creds *google.Credentials) (string, error) { timer := time.NewTimer(time.Second) @@ -215,7 +225,7 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) { case <-errors: // An error that is returned before the timer expires is likely to be // connection refused. Temporarily (2024-03-21) return the GDU domain. - return universeDomainDefault, nil + return defaultUniverseDomain, nil case res := <-results: return res, nil case <-timer.C: // Timer is expired. @@ -227,6 +237,6 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) { // calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go // and remove this method to close // https://github.com/googleapis/google-api-go-client/issues/2399. - return universeDomainDefault, nil + return defaultUniverseDomain, nil } } diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 9c5b996bb4..36e075d21f 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.172.0" +const Version = "0.196.0" diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index c882c1eb48..23aba01b62 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -9,6 +9,7 @@ import ( "crypto/tls" "net/http" + "cloud.google.com/go/auth" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/internal" @@ -344,6 +345,19 @@ func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } +// WithAuthCredentials returns a ClientOption that specifies an +// [cloud.google.com/go/auth.Credentials] to be used as the basis for +// authentication. +func WithAuthCredentials(creds *auth.Credentials) ClientOption { + return withAuthCredentials{creds} +} + +type withAuthCredentials struct{ creds *auth.Credentials } + +func (w withAuthCredentials) Apply(o *internal.DialSettings) { + o.AuthCredentials = w.creds +} + // WithUniverseDomain returns a ClientOption that sets the universe domain. // // This is an EXPERIMENTAL API and may be changed or removed in the future. diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c4f5e0b138..2e2b15c6e0 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -15,6 +15,10 @@ import ( "net/http" "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/oauth2adapt" "go.opencensus.io/plugin/ochttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" @@ -43,6 +47,13 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, return settings.HTTPClient, endpoint, nil } + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, nil, settings) + if err != nil { + return nil, "", err + } + return client, endpoint, nil + } trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err @@ -50,6 +61,82 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, return &http.Client{Transport: trans}, endpoint, nil } +// newClientNewAuth is an adapter to call new auth library. +func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.DialSettings) (*http.Client, error) { + // honor options if set + var creds *auth.Credentials + if ds.InternalCredentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials) + } else if ds.Credentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials) + } else if ds.AuthCredentials != nil { + creds = ds.AuthCredentials + } else if ds.TokenSource != nil { + credOpts := &auth.CredentialsOptions{ + TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource), + } + if ds.QuotaProject != "" { + credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return ds.QuotaProject, nil + }) + } + creds = auth.NewCredentials(credOpts) + } + + var skipValidation bool + // If our clients explicitly setup the credential skip validation as it is + // assumed correct + if ds.SkipValidation || ds.InternalCredentials != nil { + skipValidation = true + } + + // Defaults for older clients that don't set this value yet + defaultEndpointTemplate := ds.DefaultEndpointTemplate + if defaultEndpointTemplate == "" { + defaultEndpointTemplate = ds.DefaultEndpoint + } + + var aud string + if len(ds.Audiences) > 0 { + aud = ds.Audiences[0] + } + headers := http.Header{} + if ds.QuotaProject != "" { + headers.Set("X-goog-user-project", ds.QuotaProject) + } + if ds.RequestReason != "" { + headers.Set("X-goog-request-reason", ds.RequestReason) + } + client, err := httptransport.NewClient(&httptransport.Options{ + DisableTelemetry: ds.TelemetryDisabled, + DisableAuthentication: ds.NoAuth, + Headers: headers, + Endpoint: ds.Endpoint, + APIKey: ds.APIKey, + Credentials: creds, + ClientCertProvider: ds.ClientCertSource, + BaseRoundTripper: base, + DetectOpts: &credentials.DetectOptions{ + Scopes: ds.Scopes, + Audience: aud, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + }, + InternalOptions: &httptransport.InternalOptions{ + EnableJWTWithScope: ds.EnableJwtWithScope, + DefaultAudience: ds.DefaultAudience, + DefaultEndpointTemplate: defaultEndpointTemplate, + DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint, + DefaultScopes: ds.DefaultScopes, + SkipValidation: skipValidation, + }, + }) + if err != nil { + return nil, err + } + return client, nil +} + // NewTransport creates an http.RoundTripper for use communicating with a Google // cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { @@ -60,6 +147,13 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl if settings.HTTPClient != nil { return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") } + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, base, settings) + if err != nil { + return nil, err + } + return client.Transport, nil + } return newTransport(ctx, base, settings) } @@ -88,17 +182,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if settings.TokenSource == nil { - // We only validate non-tokensource creds, as TokenSource-based credentials - // don't propagate universe. - credsUniverseDomain, err := internal.GetUniverseDomain(creds) - if err != nil { - return nil, err - } - if settings.GetUniverseDomain() != credsUniverseDomain { - return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) - } - } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 0000000000..8b462f3dfe --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,119 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/annotations.proto + +package annotations + +import ( + reflect "reflect" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See `HttpRule`. + // + // optional google.api.HttpRule http = 72295728; + E_Http = &file_google_api_annotations_proto_extTypes[0] +) + +var File_google_api_annotations_proto protoreflect.FileDescriptor + +var file_google_api_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_api_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*HttpRule)(nil), // 1: google.api.HttpRule +} +var file_google_api_annotations_proto_depIdxs = []int32{ + 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 0000000000..aa69fb4d50 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,1940 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/client.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 + // Shopping Org. + ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 + // Geo Org. + ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 + // Generative AI - https://developers.generativeai.google + ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + 5: "SHOPPING", + 6: "GEO", + 7: "GENERATIVE_AI", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + "SHOPPING": 5, + "GEO": 6, + "GENERATIVE_AI": 7, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + // + // Deprecated: Do not use. + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Do not use. +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +func (x *Publishing) GetProtoReferenceDocumentationUri() string { + if x != nil { + return x.ProtoReferenceDocumentationUri + } + return "" +} + +func (x *Publishing) GetRestReferenceDocumentationUri() string { + if x != nil { + return x.RestReferenceDocumentationUri + } + return "" +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"` + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"` + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *DotnetSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + +func (x *DotnetSettings) GetRenamedResources() map[string]string { + if x != nil { + return x.RenamedResources + } + return nil +} + +func (x *DotnetSettings) GetIgnoredResources() []string { + if x != nil { + return x.IgnoredResources + } + return nil +} + +func (x *DotnetSettings) GetForcedNamespaceAliases() []string { + if x != nil { + return x.ForcedNamespaceAliases + } + return nil +} + +func (x *DotnetSettings) GetHandwrittenSignatures() []string { + if x != nil { + return x.HandwrittenSignatures + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 525000001, + Name: "google.api.api_version", + Tag: "bytes,525000001,opt,name=api_version", + Filename: "google/api/client.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // - Adding this annotation to an unannotated method is backwards + // compatible. + // - Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // - Modifying or removing an existing method signature annotation is + // a breaking change. + // - Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + // + // optional string api_version = 525000001; + E_ApiVersion = &file_google_api_client_proto_extTypes[3] +) + +var File_google_api_client_proto protoreflect.FileDescriptor + +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, + 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, + 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, + 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, + 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, + 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, + 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, + 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, + 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, + 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData +} + +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_api_client_proto_goTypes = []interface{}{ + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +} + +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 2, + NumMessages: 17, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 0000000000..08505ba3fe --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,266 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_behavior.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 +) + +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, + } +) + +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p +} + +func (x FieldBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] +} + +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, + 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, + 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData +} + +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 0000000000..a462e7d013 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,392 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters with zeros compressed, following + // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example, + // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 0000000000..ffb5838cb1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,774 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/http.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` +} + +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} + +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules + } + return nil +} + +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion + } + return false +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// # Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// # Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are assignable to Pattern: + // + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` +} + +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRule) ProtoMessage() {} + +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody + } + return "" +} + +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings + } + return nil +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} + +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 0000000000..b5db279aeb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,660 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/resource.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A description of the historical or future-looking state of the +// resource pattern. +type ResourceDescriptor_History int32 + +const ( + // The "unset" value. + ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 +) + +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) + +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p +} + +func (x ResourceDescriptor_History) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} +} + +// A flag representing a specific style that a resource claims to conform to. +type ResourceDescriptor_Style int32 + +const ( + // The unspecified value. Do not use. + ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 +) + +// Enum value maps for ResourceDescriptor_Style. +var ( + ResourceDescriptor_Style_name = map[int32]string{ + 0: "STYLE_UNSPECIFIED", + 1: "DECLARATIVE_FRIENDLY", + } + ResourceDescriptor_Style_value = map[string]int32{ + "STYLE_UNSPECIFIED": 0, + "DECLARATIVE_FRIENDLY": 1, + } +) + +func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { + p := new(ResourceDescriptor_Style) + *p = x + return p +} + +func (x ResourceDescriptor_Style) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[1].Descriptor() +} + +func (ResourceDescriptor_Style) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[1] +} + +func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. +func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField + } + return "" +} + +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History + } + return ResourceDescriptor_HISTORY_UNSPECIFIED +} + +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { + if x != nil { + return x.Style + } + return nil +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` +} + +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType + } + return "" +} + +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, + 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, + 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, + 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, + 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, + 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, + 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, + 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData +} + +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style + (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 3: google.api.ResourceReference + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style + 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions + 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference + 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 5, // [5:8] is the sub-list for extension type_name + 2, // [2:5] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go new file mode 100644 index 0000000000..1d8397b02b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -0,0 +1,693 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/routing.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the routing information that should be sent along with the request +// in the form of routing header. +// **NOTE:** All service configuration rules follow the "last one wins" order. +// +// The examples below will apply to an RPC which has the following request type: +// +// Message Definition: +// +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } +// +// Example message: +// +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } +// +// The routing header consists of one or multiple key-value pairs. Every key +// and value must be percent-encoded, and joined together in the format of +// `key1=value1&key2=value2`. +// In the examples below I am skipping the percent-encoding for readablity. +// +// # Example 1 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key equal to the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; +// +// result: +// +// x-goog-request-params: app_profile_id=profiles/prof_qux +// +// # Example 2 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key different from the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 3 +// +// Extracting a field from the request to put into the routing +// header, while matching a path template syntax on the field's value. +// +// NB: it is more useful to send nothing than to send garbage for the purpose +// of dynamic routing, since garbage pollutes cache. Thus the matching. +// +// # Sub-example 3a +// +// The field matches the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Sub-example 3b +// +// The field does not match the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; +// +// result: +// +// +// +// # Sub-example 3c +// +// Multiple alternative conflictingly named path templates are +// specified. The one that matches is used to construct the header. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Example 4 +// +// Extracting a single routing header key-value pair by matching a +// template syntax on (a part of) a single request field. +// +// annotation: +// +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=projects/proj_foo +// +// # Example 5 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on (parts of) a single request +// field. The last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar +// +// # Example 6 +// +// Extracting multiple routing header key-value pairs by matching +// several non-conflicting path templates on (parts of) a single request field. +// +// # Sub-example 6a +// +// Make the templates strict, so that if the `table_name` does not +// have an instance information, nothing is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Sub-example 6b +// +// Make the templates loose, so that if the `table_name` does not +// have an instance information, just the project id part is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result (is the same as 6a for our example message because it has the instance +// information): +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Example 7 +// +// Extracting multiple routing header key-value pairs by matching +// several path templates on multiple request fields. +// +// NB: note that here there is no way to specify sending nothing if one of the +// fields does not match its template. E.g. if the `table_name` is in the wrong +// format, the `project_id` will not be sent, but the `routing_id` will be. +// The backend routing code has to be aware of that and be prepared to not +// receive a full complement of keys if it expects multiple. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// +// # Example 8 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on several request fields. The +// last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 9 +// +// Bringing it all together. +// +// annotation: +// +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux +type RoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of Routing Parameter specifications. + // **NOTE:** If multiple Routing Parameters describe the same key + // (via the `path_template` field or via the `field` field when + // `path_template` is not provided), "last one wins" rule + // determines which Parameter gets used. + // See the examples for more details. + RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` +} + +func (x *RoutingRule) Reset() { + *x = RoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingRule) ProtoMessage() {} + +func (x *RoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. +func (*RoutingRule) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { + if x != nil { + return x.RoutingParameters + } + return nil +} + +// A projection from an input message to the GRPC or REST header. +type RoutingParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A request field to extract the header key-value pair from. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A pattern matching the key-value field. Optional. + // If not specified, the whole field specified in the `field` field will be + // taken as value, and its name used as key. If specified, it MUST contain + // exactly one named segment (along with any number of unnamed segments) The + // pattern will be matched over the field specified in the `field` field, then + // if the match is successful: + // - the name of the single named segment will be used as a header name, + // - the match value of the segment will be used as a header value; + // if the match is NOT successful, nothing will be sent. + // + // Example: + // + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. + // + // When looking at this specific example, we can see that: + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. + // + // **NB:** If the `path_template` field is not provided, the key name is + // equal to the field name, and the whole field should be sent as a value. + // This makes the pattern for the field and the value functionally equivalent + // to `**`, and the configuration + // + // { + // field: "table_name" + // } + // + // is a functionally equivalent shorthand to: + // + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } + // + // See Example 1 for more details. + PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` +} + +func (x *RoutingParameter) Reset() { + *x = RoutingParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingParameter) ProtoMessage() {} + +func (x *RoutingParameter) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. +func (*RoutingParameter) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *RoutingParameter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *RoutingParameter) GetPathTemplate() string { + if x != nil { + return x.PathTemplate + } + return "" +} + +var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingRule)(nil), + Field: 72295729, + Name: "google.api.routing", + Tag: "bytes,72295729,opt,name=routing", + Filename: "google/api/routing.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See RoutingRule. + // + // optional google.api.RoutingRule routing = 72295729; + E_Routing = &file_google_api_routing_proto_extTypes[0] +) + +var File_google_api_routing_proto protoreflect.FileDescriptor + +var file_google_api_routing_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, + 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_routing_proto_rawDescOnce sync.Once + file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc +) + +func file_google_api_routing_proto_rawDescGZIP() []byte { + file_google_api_routing_proto_rawDescOnce.Do(func() { + file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) + }) + return file_google_api_routing_proto_rawDescData +} + +var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_routing_proto_goTypes = []interface{}{ + (*RoutingRule)(nil), // 0: google.api.RoutingRule + (*RoutingParameter)(nil), // 1: google.api.RoutingParameter + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_google_api_routing_proto_depIdxs = []int32{ + 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter + 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions + 0, // 2: google.api.routing:type_name -> google.api.RoutingRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_routing_proto_init() } +func file_google_api_routing_proto_init() { + if File_google_api_routing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_routing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_routing_proto_goTypes, + DependencyIndexes: file_google_api_routing_proto_depIdxs, + MessageInfos: file_google_api_routing_proto_msgTypes, + ExtensionInfos: file_google_api_routing_proto_extTypes, + }.Build() + File_google_api_routing_proto = out.File + file_google_api_routing_proto_rawDesc = nil + file_google_api_routing_proto_goTypes = nil + file_google_api_routing_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 0000000000..498020e33c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 187eb5d8c5..559aebb59a 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -192,7 +192,7 @@ func (t *Trace) Log() { t.endTime = &endTime t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index a318d07b7a..e583da7908 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,35 @@ -# cloud.google.com/go/compute/metadata v0.3.0 -## explicit; go 1.19 -cloud.google.com/go/compute/metadata -# cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 +# cloud.google.com/go/auth v0.9.3 +## explicit; go 1.21 +cloud.google.com/go/auth +cloud.google.com/go/auth/credentials +cloud.google.com/go/auth/credentials/idtoken +cloud.google.com/go/auth/credentials/impersonate +cloud.google.com/go/auth/credentials/internal/externalaccount +cloud.google.com/go/auth/credentials/internal/externalaccountuser +cloud.google.com/go/auth/credentials/internal/gdch +cloud.google.com/go/auth/credentials/internal/impersonate +cloud.google.com/go/auth/credentials/internal/stsexchange +cloud.google.com/go/auth/httptransport +cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/credsfile +cloud.google.com/go/auth/internal/jwt +cloud.google.com/go/auth/internal/transport +cloud.google.com/go/auth/internal/transport/cert +# cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 -cuelabs.dev/go/oci/ociregistry -# cuelang.org/go v0.7.0 +cloud.google.com/go/auth/oauth2adapt +# cloud.google.com/go/compute/metadata v0.5.0 ## explicit; go 1.20 +cloud.google.com/go/compute/metadata +# cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 +## explicit; go 1.21 +cuelabs.dev/go/oci/ociregistry +cuelabs.dev/go/oci/ociregistry/internal/ocirequest +cuelabs.dev/go/oci/ociregistry/ociauth +cuelabs.dev/go/oci/ociregistry/ociclient +cuelabs.dev/go/oci/ociregistry/ociref +# cuelang.org/go v0.9.2 +## explicit; go 1.21 cuelang.org/go/cue cuelang.org/go/cue/ast cuelang.org/go/cue/ast/astutil @@ -19,7 +43,6 @@ cuelang.org/go/cue/parser cuelang.org/go/cue/scanner cuelang.org/go/cue/stats cuelang.org/go/cue/token -cuelang.org/go/encoding/gocode/gocodec cuelang.org/go/encoding/json cuelang.org/go/encoding/jsonschema cuelang.org/go/encoding/openapi @@ -41,24 +64,37 @@ cuelang.org/go/internal/core/runtime cuelang.org/go/internal/core/subsume cuelang.org/go/internal/core/validate cuelang.org/go/internal/core/walk +cuelang.org/go/internal/cueconfig +cuelang.org/go/internal/cuedebug +cuelang.org/go/internal/cueexperiment +cuelang.org/go/internal/cueimports +cuelang.org/go/internal/cueversion cuelang.org/go/internal/encoding cuelang.org/go/internal/encoding/json cuelang.org/go/internal/encoding/yaml +cuelang.org/go/internal/envflag cuelang.org/go/internal/filetypes -cuelang.org/go/internal/mod/internal/par -cuelang.org/go/internal/mod/modfile -cuelang.org/go/internal/mod/modregistry -cuelang.org/go/internal/mod/module -cuelang.org/go/internal/mod/modzip +cuelang.org/go/internal/golangorgx/tools/robustio +cuelang.org/go/internal/mod/modimports +cuelang.org/go/internal/mod/modload +cuelang.org/go/internal/mod/modpkgload +cuelang.org/go/internal/mod/modrequirements +cuelang.org/go/internal/mod/modresolve cuelang.org/go/internal/mod/mvs cuelang.org/go/internal/mod/semver +cuelang.org/go/internal/par cuelang.org/go/internal/pkg -cuelang.org/go/internal/slices cuelang.org/go/internal/source cuelang.org/go/internal/task cuelang.org/go/internal/third_party/yaml cuelang.org/go/internal/types cuelang.org/go/internal/value +cuelang.org/go/mod/modcache +cuelang.org/go/mod/modconfig +cuelang.org/go/mod/modfile +cuelang.org/go/mod/modregistry +cuelang.org/go/mod/module +cuelang.org/go/mod/modzip cuelang.org/go/pkg cuelang.org/go/pkg/crypto/ed25519 cuelang.org/go/pkg/crypto/hmac @@ -95,9 +131,9 @@ cuelang.org/go/pkg/uuid ## explicit; go 1.20 filippo.io/edwards25519 filippo.io/edwards25519/field -# github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 -## explicit; go 1.17 -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper +# github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 +## explicit; go 1.16 +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider # github.com/Azure/azure-sdk-for-go v68.0.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry @@ -201,7 +237,7 @@ github.com/alibabacloud-go/tea-utils/service # github.com/alibabacloud-go/tea-xml v1.1.3 ## explicit; go 1.14 github.com/alibabacloud-go/tea-xml/service -# github.com/aliyun/credentials-go v1.3.1 +# github.com/aliyun/credentials-go v1.3.2 ## explicit; go 1.14 github.com/aliyun/credentials-go/credentials github.com/aliyun/credentials-go/credentials/request @@ -213,8 +249,8 @@ github.com/aquasecurity/libbpfgo # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go-v2 v1.26.0 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2 v1.30.5 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults github.com/aws/aws-sdk-go-v2/aws/middleware @@ -229,8 +265,10 @@ github.com/aws/aws-sdk-go-v2/aws/signer/v4 github.com/aws/aws-sdk-go-v2/aws/transport/http github.com/aws/aws-sdk-go-v2/internal/auth github.com/aws/aws-sdk-go-v2/internal/auth/smithy +github.com/aws/aws-sdk-go-v2/internal/context github.com/aws/aws-sdk-go-v2/internal/endpoints github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn +github.com/aws/aws-sdk-go-v2/internal/middleware github.com/aws/aws-sdk-go-v2/internal/rand github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/sdkio @@ -238,11 +276,11 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.27.9 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/config v1.27.33 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.9 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.32 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/endpointcreds @@ -250,18 +288,18 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 ## explicit; go 1.15 @@ -273,29 +311,29 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.20.1 -## explicit; go 1.20 +# github.com/aws/smithy-go v1.20.4 +## explicit; go 1.21 github.com/aws/smithy-go github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer @@ -333,18 +371,23 @@ github.com/blang/semver # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/buildkite/agent/v3 v3.62.0 -## explicit; go 1.21 +# github.com/buildkite/agent/v3 v3.81.0 +## explicit; go 1.22.6 github.com/buildkite/agent/v3/api github.com/buildkite/agent/v3/logger github.com/buildkite/agent/v3/version -# github.com/buildkite/go-pipeline v0.3.2 -## explicit; go 1.21 +# github.com/buildkite/go-pipeline v0.13.1 +## explicit; go 1.22.6 github.com/buildkite/go-pipeline +github.com/buildkite/go-pipeline/internal/env github.com/buildkite/go-pipeline/ordered -# github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 -## explicit +github.com/buildkite/go-pipeline/warning +# github.com/buildkite/interpolate v0.1.3 +## explicit; go 1.22 github.com/buildkite/interpolate +# github.com/buildkite/roko v1.2.0 +## explicit; go 1.18 +github.com/buildkite/roko # github.com/cert-manager/cert-manager v1.14.5 ## explicit; go 1.21 github.com/cert-manager/cert-manager/pkg/apis/acme @@ -391,7 +434,7 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containers/common v0.60.2 ## explicit; go 1.21.0 github.com/containers/common/pkg/seccomp -# github.com/coreos/go-oidc/v3 v3.10.0 +# github.com/coreos/go-oidc/v3 v3.11.0 ## explicit; go 1.21 github.com/coreos/go-oidc/v3/oidc # github.com/coreos/go-systemd/v22 v22.5.0 @@ -431,6 +474,9 @@ github.com/docker/distribution/registry/client/auth/challenge ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials +# github.com/dustin/go-humanize v1.0.1 +## explicit; go 1.16 +github.com/dustin/go-humanize # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -468,11 +514,12 @@ github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/cryptosigner github.com/go-jose/go-jose/v3/json github.com/go-jose/go-jose/v3/jwt -# github.com/go-jose/go-jose/v4 v4.0.2 +# github.com/go-jose/go-jose/v4 v4.0.4 ## explicit; go 1.21 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json +github.com/go-jose/go-jose/v4/jwt # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr @@ -561,11 +608,16 @@ github.com/golang/protobuf/proto # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/certificate-transparency-go v1.1.8 -## explicit; go 1.21 +# github.com/google/certificate-transparency-go v1.2.1 +## explicit; go 1.21.0 github.com/google/certificate-transparency-go github.com/google/certificate-transparency-go/asn1 +github.com/google/certificate-transparency-go/client +github.com/google/certificate-transparency-go/client/configpb +github.com/google/certificate-transparency-go/ctutil github.com/google/certificate-transparency-go/gossip/minimal/x509ext +github.com/google/certificate-transparency-go/jsonclient +github.com/google/certificate-transparency-go/loglist3 github.com/google/certificate-transparency-go/tls github.com/google/certificate-transparency-go/x509 github.com/google/certificate-transparency-go/x509/pkix @@ -623,8 +675,8 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/s2a-go v0.1.7 -## explicit; go 1.19 +# github.com/google/s2a-go v0.1.8 +## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -649,7 +701,7 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.3 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -677,6 +729,9 @@ github.com/hashicorp/hcl/json/token # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/in-toto/attestation v1.1.0 +## explicit; go 1.20 +github.com/in-toto/attestation/go/v1 # github.com/in-toto/in-toto-golang v0.9.0 ## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto @@ -711,8 +766,8 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 -## explicit; go 1.21 +# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec +## explicit; go 1.22.0 github.com/letsencrypt/boulder/core github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier @@ -770,14 +825,11 @@ github.com/modern-go/reflect2 # github.com/mogensen/kubernetes-split-yaml v0.4.0 ## explicit; go 1.19 github.com/mogensen/kubernetes-split-yaml -# github.com/mozillazg/docker-credential-acr-helper v0.3.0 -## explicit; go 1.17 +# github.com/mozillazg/docker-credential-acr-helper v0.4.0 +## explicit; go 1.18 github.com/mozillazg/docker-credential-acr-helper/pkg/acr github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper github.com/mozillazg/docker-credential-acr-helper/pkg/version -# github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de -## explicit -github.com/mpvl/unique # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -794,11 +846,11 @@ github.com/nxadm/tail/winfile # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/oleiade/reflections v1.0.1 -## explicit; go 1.15 +# github.com/oleiade/reflections v1.1.0 +## explicit; go 1.22.6 github.com/oleiade/reflections -# github.com/open-policy-agent/opa v0.61.0 -## explicit; go 1.19 +# github.com/open-policy-agent/opa v0.68.0 +## explicit; go 1.21 github.com/open-policy-agent/opa/ast github.com/open-policy-agent/opa/ast/internal/scanner github.com/open-policy-agent/opa/ast/internal/tokens @@ -819,7 +871,6 @@ github.com/open-policy-agent/opa/internal/debug github.com/open-policy-agent/opa/internal/deepcopy github.com/open-policy-agent/opa/internal/edittree github.com/open-policy-agent/opa/internal/edittree/bitvector -github.com/open-policy-agent/opa/internal/errors github.com/open-policy-agent/opa/internal/file/archive github.com/open-policy-agent/opa/internal/file/url github.com/open-policy-agent/opa/internal/future @@ -886,6 +937,7 @@ github.com/open-policy-agent/opa/topdown/print github.com/open-policy-agent/opa/tracing github.com/open-policy-agent/opa/types github.com/open-policy-agent/opa/util +github.com/open-policy-agent/opa/util/decoding github.com/open-policy-agent/opa/version # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 @@ -920,7 +972,7 @@ github.com/opentracing/opentracing-go/log # github.com/pborman/uuid v1.2.1 ## explicit github.com/pborman/uuid -# github.com/pelletier/go-toml/v2 v2.1.0 +# github.com/pelletier/go-toml/v2 v2.2.2 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters @@ -969,6 +1021,13 @@ github.com/protocolbuffers/txtpbfmt/unquote # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics +# github.com/rogpeppe/go-internal v1.12.0 +## explicit; go 1.20 +github.com/rogpeppe/go-internal/internal/syscall/windows +github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll +github.com/rogpeppe/go-internal/lockedfile +github.com/rogpeppe/go-internal/lockedfile/internal/filelock +github.com/rogpeppe/go-internal/robustio # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -997,8 +1056,8 @@ github.com/segmentio/ksuid # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec -# github.com/sigstore/cosign/v2 v2.2.3 -## explicit; go 1.21 +# github.com/sigstore/cosign/v2 v2.4.1 +## explicit; go 1.22.7 github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcioverifier github.com/sigstore/cosign/v2/cmd/cosign/cli/generate @@ -1012,9 +1071,11 @@ github.com/sigstore/cosign/v2/internal/pkg/cosign github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots github.com/sigstore/cosign/v2/internal/pkg/cosign/payload +github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size github.com/sigstore/cosign/v2/internal/pkg/cosign/rekor github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa github.com/sigstore/cosign/v2/internal/pkg/cosign/tsa/client +github.com/sigstore/cosign/v2/internal/pkg/now github.com/sigstore/cosign/v2/internal/pkg/oci/remote github.com/sigstore/cosign/v2/internal/ui github.com/sigstore/cosign/v2/pkg/blob @@ -1052,9 +1113,16 @@ github.com/sigstore/cosign/v2/pkg/providers/google github.com/sigstore/cosign/v2/pkg/providers/spiffe github.com/sigstore/cosign/v2/pkg/signature github.com/sigstore/cosign/v2/pkg/types -# github.com/sigstore/fulcio v1.4.5 -## explicit; go 1.21 +# github.com/sigstore/fulcio v1.6.3 +## explicit; go 1.22.5 github.com/sigstore/fulcio/pkg/api +# github.com/sigstore/protobuf-specs v0.3.2 +## explicit; go 1.18 +github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/dsse +github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1 +github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1 # github.com/sigstore/rekor v1.3.6 ## explicit; go 1.21 github.com/sigstore/rekor/pkg/client @@ -1073,6 +1141,7 @@ github.com/sigstore/rekor/pkg/pki/pkcs7 github.com/sigstore/rekor/pkg/pki/ssh github.com/sigstore/rekor/pkg/pki/tuf github.com/sigstore/rekor/pkg/pki/x509 +github.com/sigstore/rekor/pkg/tle github.com/sigstore/rekor/pkg/types github.com/sigstore/rekor/pkg/types/dsse github.com/sigstore/rekor/pkg/types/dsse/v0.0.1 @@ -1084,8 +1153,9 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.4 -## explicit; go 1.21 +github.com/sigstore/rekor/pkg/verify +# github.com/sigstore/sigstore v1.8.9 +## explicit; go 1.22.5 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots github.com/sigstore/sigstore/pkg/oauth @@ -1096,7 +1166,16 @@ github.com/sigstore/sigstore/pkg/signature/kms github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload github.com/sigstore/sigstore/pkg/tuf -# github.com/sigstore/timestamp-authority v1.2.1 +# github.com/sigstore/sigstore-go v0.6.1 +## explicit; go 1.22.5 +github.com/sigstore/sigstore-go/pkg/bundle +github.com/sigstore/sigstore-go/pkg/fulcio/certificate +github.com/sigstore/sigstore-go/pkg/root +github.com/sigstore/sigstore-go/pkg/tlog +github.com/sigstore/sigstore-go/pkg/tuf +github.com/sigstore/sigstore-go/pkg/util +github.com/sigstore/sigstore-go/pkg/verify +# github.com/sigstore/timestamp-authority v1.2.2 ## explicit; go 1.21 github.com/sigstore/timestamp-authority/pkg/verification # github.com/sirupsen/logrus v1.9.3 @@ -1125,8 +1204,8 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.18.2 -## explicit; go 1.18 +# github.com/spf13/viper v1.19.0 +## explicit; go 1.20 github.com/spf13/viper github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv @@ -1137,8 +1216,8 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features -# github.com/spiffe/go-spiffe/v2 v2.1.7 -## explicit; go 1.19 +# github.com/spiffe/go-spiffe/v2 v2.3.0 +## explicit; go 1.21 github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle github.com/spiffe/go-spiffe/v2/bundle/x509bundle @@ -1195,6 +1274,13 @@ github.com/theupdateframework/go-tuf/pkg/targets github.com/theupdateframework/go-tuf/sign github.com/theupdateframework/go-tuf/util github.com/theupdateframework/go-tuf/verify +# github.com/theupdateframework/go-tuf/v2 v2.0.1 +## explicit; go 1.21 +github.com/theupdateframework/go-tuf/v2/metadata +github.com/theupdateframework/go-tuf/v2/metadata/config +github.com/theupdateframework/go-tuf/v2/metadata/fetcher +github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata +github.com/theupdateframework/go-tuf/v2/metadata/updater # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck @@ -1213,7 +1299,7 @@ github.com/urfave/cli/v2 # github.com/vbatts/tar-split v0.11.5 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar -# github.com/xanzy/go-gitlab v0.96.0 +# github.com/xanzy/go-gitlab v0.109.0 ## explicit; go 1.19 github.com/xanzy/go-gitlab # github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb @@ -1258,12 +1344,14 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1277,30 +1365,32 @@ go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/trace v1.29.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.step.sm/crypto v0.44.2 -## explicit; go 1.20 +# go.step.sm/crypto v0.51.2 +## explicit; go 1.22 go.step.sm/crypto/fingerprint go.step.sm/crypto/internal/bcrypt_pbkdf go.step.sm/crypto/internal/emoji go.step.sm/crypto/internal/utils +go.step.sm/crypto/internal/utils/utfbom go.step.sm/crypto/jose go.step.sm/crypto/keyutil go.step.sm/crypto/pemutil @@ -1371,6 +1461,7 @@ golang.org/x/mod/sumdb/note ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack @@ -1379,7 +1470,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1424,7 +1515,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.24.0 @@ -1454,8 +1545,8 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.172.0 -## explicit; go 1.19 +# google.golang.org/api v0.196.0 +## explicit; go 1.21 google.golang.org/api/googleapi/transport google.golang.org/api/idtoken google.golang.org/api/impersonate @@ -1466,8 +1557,12 @@ google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed +## explicit; go 1.21 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.66.2 ## explicit; go 1.21 @@ -1881,7 +1976,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20240310230437-4693a0247e57 +# k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1992,7 +2087,7 @@ sigs.k8s.io/gateway-api/apis/v1 ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/release-utils v0.8.1 +# sigs.k8s.io/release-utils v0.8.4 ## explicit; go 1.21 sigs.k8s.io/release-utils/command sigs.k8s.io/release-utils/env diff --git a/vendor/sigs.k8s.io/release-utils/command/command.go b/vendor/sigs.k8s.io/release-utils/command/command.go index bfb85d8d94..a645de6d6c 100644 --- a/vendor/sigs.k8s.io/release-utils/command/command.go +++ b/vendor/sigs.k8s.io/release-utils/command/command.go @@ -30,7 +30,7 @@ import ( "github.com/sirupsen/logrus" ) -// A generic command abstraction +// A generic command abstraction. type Command struct { cmds []*command stdErrWriters, stdOutWriters []io.Writer @@ -39,7 +39,7 @@ type Command struct { filter *filter } -// The internal command representation +// The internal command representation. type command struct { *exec.Cmd pipeWriter *io.PipeWriter @@ -51,19 +51,19 @@ type filter struct { replaceAll string } -// A generic command exit status +// A generic command exit status. type Status struct { waitStatus syscall.WaitStatus *Stream } -// Stream combines standard output and error +// Stream combines standard output and error. type Stream struct { //nolint: errname stdOut string stdErr string } -// Commands is an abstraction over multiple Command structures +// Commands is an abstraction over multiple Command structures. type Commands []*Command // New creates a new command from the provided arguments. @@ -91,7 +91,7 @@ func cmdWithDir(dir, cmd string, args ...string) *exec.Cmd { return c } -// Pipe creates a new command where the previous should be piped to +// Pipe creates a new command where the previous should be piped to. func (c *Command) Pipe(cmd string, args ...string) *Command { pipeCmd := cmdWithDir(c.cmds[0].Dir, cmd, args...) @@ -121,7 +121,7 @@ func (c *Command) Verbose() *Command { } // isVerbose returns true if the command is in verbose mode, either set locally -// or global +// or global. func (c *Command) isVerbose() bool { return GetGlobalVerbose() || c.verbose } @@ -174,7 +174,7 @@ func (c *Command) Filter(regex, replaceAll string) (*Command, error) { // Run starts the command and waits for it to finish. It returns an error if // the command execution was not possible at all, otherwise the Status. -// This method prints the commands output during execution +// This method prints the commands output during execution. func (c *Command) Run() (res *Status, err error) { return c.run(true) } @@ -199,7 +199,7 @@ func (c *Command) RunSuccess() error { return err } -// String returns a string representation of the full command +// String returns a string representation of the full command. func (c *Command) String() string { str := []string{} for _, x := range c.cmds { @@ -246,7 +246,7 @@ func (c *Command) RunSilentSuccess() error { return err } -// run is the internal run method +// run is the internal run method. func (c *Command) run(printOutput bool) (res *Status, err error) { var runErr error stdOutBuffer := &bytes.Buffer{} @@ -371,17 +371,17 @@ func (c *Command) run(printOutput bool) (res *Status, err error) { return status, runErr } -// Success returns if a Status was successful +// Success returns if a Status was successful. func (s *Status) Success() bool { return s.waitStatus.ExitStatus() == 0 } -// ExitCode returns the exit status of the command status +// ExitCode returns the exit status of the command status. func (s *Status) ExitCode() int { return s.waitStatus.ExitStatus() } -// Output returns stdout of the command status +// Output returns stdout of the command status. func (s *Stream) Output() string { return s.stdOut } @@ -392,7 +392,7 @@ func (s *Stream) OutputTrimNL() string { return strings.TrimSpace(s.stdOut) } -// Error returns the stderr of the command status +// Error returns the stderr of the command status. func (s *Stream) Error() string { return s.stdErr } diff --git a/vendor/sigs.k8s.io/release-utils/command/global.go b/vendor/sigs.k8s.io/release-utils/command/global.go index 687e707399..7ae559ffc8 100644 --- a/vendor/sigs.k8s.io/release-utils/command/global.go +++ b/vendor/sigs.k8s.io/release-utils/command/global.go @@ -24,7 +24,7 @@ import ( // level. It should never be used directly to avoid data races. var atomicInt int32 -// SetGlobalVerbose sets the global command verbosity to the specified value +// SetGlobalVerbose sets the global command verbosity to the specified value. func SetGlobalVerbose(to bool) { var i int32 if to { @@ -33,7 +33,7 @@ func SetGlobalVerbose(to bool) { atomic.StoreInt32(&atomicInt, i) } -// GetGlobalVerbose returns the globally set command verbosity +// GetGlobalVerbose returns the globally set command verbosity. func GetGlobalVerbose() bool { return atomic.LoadInt32(&atomicInt) != 0 } diff --git a/vendor/sigs.k8s.io/release-utils/util/common.go b/vendor/sigs.k8s.io/release-utils/util/common.go index 29b8646da0..7a860e794f 100644 --- a/vendor/sigs.k8s.io/release-utils/util/common.go +++ b/vendor/sigs.k8s.io/release-utils/util/common.go @@ -45,23 +45,23 @@ var ( regexpGitToken = regexp.MustCompile(`git:[a-f0-9]{35,40}@github\.com`) ) -// UserInputError a custom error to handle more user input info +// UserInputError a custom error to handle more user input info. type UserInputError struct { ErrorString string isCtrlC bool } -// Error return the error string +// Error return the error string. func (e UserInputError) Error() string { return e.ErrorString } -// IsCtrlC return true if the user has hit Ctrl+C +// IsCtrlC return true if the user has hit Ctrl+C. func (e UserInputError) IsCtrlC() bool { return e.isCtrlC } -// NewUserInputError creates a new UserInputError +// NewUserInputError creates a new UserInputError. func NewUserInputError(message string, ctrlC bool) UserInputError { return UserInputError{ ErrorString: message, @@ -488,7 +488,20 @@ func Exists(path string) bool { return true } -// WrapText wraps a text +// IsDir returns true if the path is a directory. +func IsDir(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + if info.IsDir() { + return true + } + return false +} + +// WrapText wraps a text. func WrapText(originalText string, lineSize int) (wrappedText string) { words := strings.Fields(strings.TrimSpace(originalText)) wrappedText = words[0] @@ -507,7 +520,7 @@ func WrapText(originalText string, lineSize int) (wrappedText string) { } // StripControlCharacters takes a slice of bytes and removes control -// characters and bare line feeds (ported from the original bash anago) +// characters and bare line feeds (ported from the original bash anago). func StripControlCharacters(logData []byte) []byte { return regexpCRLF.ReplaceAllLiteral( regexpCtrlChar.ReplaceAllLiteral(logData, []byte{}), []byte{}, @@ -515,7 +528,7 @@ func StripControlCharacters(logData []byte) []byte { } // StripSensitiveData removes data deemed sensitive or non public -// from a byte slice (ported from the original bash anago) +// from a byte slice (ported from the original bash anago). func StripSensitiveData(logData []byte) []byte { // Remove OAuth tokens logData = regexpOauthToken.ReplaceAllLiteral(logData, []byte("__SANITIZED__:x-oauth-basic")) @@ -524,7 +537,7 @@ func StripSensitiveData(logData []byte) []byte { return logData } -// CleanLogFile cleans control characters and sensitive data from a file +// CleanLogFile cleans control characters and sensitive data from a file. func CleanLogFile(logPath string) (err error) { logrus.Debugf("Sanitizing logfile %s", logPath) diff --git a/vendor/sigs.k8s.io/release-utils/version/command.go b/vendor/sigs.k8s.io/release-utils/version/command.go index d916e93b69..d4d6bfe28e 100644 --- a/vendor/sigs.k8s.io/release-utils/version/command.go +++ b/vendor/sigs.k8s.io/release-utils/version/command.go @@ -27,7 +27,7 @@ import ( // // rootCmd.AddCommand(version.Version()) // -// ``` +// ```. func Version() *cobra.Command { return version("") } @@ -37,7 +37,7 @@ func Version() *cobra.Command { // // rootCmd.AddCommand(version.WithFont("starwars")) // -// ``` +// ```. func WithFont(fontName string) *cobra.Command { return version(fontName) } diff --git a/vendor/sigs.k8s.io/release-utils/version/version.go b/vendor/sigs.k8s.io/release-utils/version/version.go index 86de4f154e..73a3cac2fa 100644 --- a/vendor/sigs.k8s.io/release-utils/version/version.go +++ b/vendor/sigs.k8s.io/release-utils/version/version.go @@ -40,13 +40,13 @@ var ( // Output of "git describe". The prerequisite is that the // branch should be tagged using the correct versioning strategy. gitVersion = "devel" - // SHA1 from git, output of $(git rev-parse HEAD) + // SHA1 from git, output of $(git rev-parse HEAD). gitCommit = unknown - // State of git tree, either "clean" or "dirty" + // State of git tree, either "clean" or "dirty". gitTreeState = unknown - // Build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + // Build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ'). buildDate = unknown - // flag to print the ascii name banner + // flag to print the ascii name banner. asciiName = "true" // goVersion is the used golang version. goVersion = unknown @@ -175,7 +175,7 @@ func GetVersionInfo() Info { return info } -// String returns the string representation of the version info +// String returns the string representation of the version info. func (i *Info) String() string { b := strings.Builder{} w := tabwriter.NewWriter(&b, 0, 0, 2, ' ', 0) @@ -205,7 +205,7 @@ func (i *Info) String() string { return b.String() } -// JSONString returns the JSON representation of the version info +// JSONString returns the JSON representation of the version info. func (i *Info) JSONString() (string, error) { b, err := json.MarshalIndent(i, "", " ") if err != nil {