diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index af24728f87b5..c16691ca580e 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -512,46 +512,23 @@ stages: artifactName: windows.release condition: always() - - job: clang_cl - timeoutInMinutes: 120 - pool: - vmImage: "windows-latest" - steps: - - task: Cache@2 - inputs: - key: '"windows.release" | ./WORKSPACE | **/*.bzl' - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true - - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh - displayName: "Run Windows clang-cl CI" - env: - CI_TARGET: "windows" - ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" - SLACK_TOKEN: $(SLACK_TOKEN) - REPO_URI: $(Build.Repository.Uri) - BUILD_URI: $(Build.BuildUri) - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-clang-cl --jobs=$(RbeJobs) --flaky_test_attempts=2" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" - testRunTitle: "clang-cl" - searchFolder: $(Build.StagingDirectory)/tmp - condition: always() - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: windows.clang-cl - condition: always() - - job: docker + strategy: + matrix: + windows2019: + imageName: 'windows-latest' + windowsBuildType: "windows" + windowsImageBase: "mcr.microsoft.com/windows/servercore" + windowsImageTag: "ltsc2019" + windows2022: + imageName: 'windows-2022' + windowsBuildType: "windows-ltsc2022" + windowsImageBase: "mcr.microsoft.com/windows/nanoserver" + windowsImageTag: "ltsc2022" dependsOn: ["release"] timeoutInMinutes: 120 pool: - vmImage: "windows-latest" + vmImage: $(imageName) steps: - task: DownloadBuildArtifacts@0 inputs: @@ -572,6 +549,9 @@ stages: AZP_SHA1: $(Build.SourceVersion) DOCKERHUB_USERNAME: $(DockerUsername) DOCKERHUB_PASSWORD: $(DockerPassword) + WINDOWS_BUILD_TYPE: $(windowsBuildType) + WINDOWS_IMAGE_BASE: $(windowsImageBase) + WINDOWS_IMAGE_TAG: $(windowsImageTag) - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/build_images" diff --git a/.bazelrc b/.bazelrc index 0028a084e27f..8685001b9b2c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -47,6 +47,7 @@ build:sanitizer --test_tag_filters=-no_san # Common flags for Clang build:clang --action_env=BAZEL_COMPILER=clang +build:clang --action_env=CC=clang --action_env=CXX=clang++ build:clang --linkopt=-fuse-ld=lld # Flags for Clang + PCH @@ -79,11 +80,13 @@ build:clang-asan --linkopt -fuse-ld=lld build:clang-asan --linkopt --rtlib=compiler-rt build:clang-asan --linkopt --unwindlib=libgcc -# macOS ASAN/UBSAN +# macOS build:macos --cxxopt=-std=c++17 build:macos --action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin build:macos --host_action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin +build:macos --define tcmalloc=disabled +# macOS ASAN/UBSAN build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined @@ -158,7 +161,7 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/common/quic/platform)[/:],//include[/:]" +build:coverage --instrumentation_filter="//source(?!/common/quic/platform)[/:],//include[/:]" build:test-coverage --test_arg="-l trace" build:fuzz-coverage --config=plain-fuzzer build:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh @@ -189,7 +192,7 @@ build:rbe-toolchain-asan --linkopt -fuse-ld=lld build:rbe-toolchain-asan --action_env=ENVOY_UBSAN_VPTR=1 build:rbe-toolchain-asan --copt=-fsanitize=vptr,function build:rbe-toolchain-asan --linkopt=-fsanitize=vptr,function -build:rbe-toolchain-asan --linkopt=-L/opt/llvm/lib/clang/11.0.1/lib/linux +build:rbe-toolchain-asan --linkopt=-L/opt/llvm/lib/clang/12.0.1/lib/linux build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:rbe-toolchain-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a @@ -265,7 +268,7 @@ build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/main/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:8ca107a75ee98b255aa59db2ab40fd0800a3ce99 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.bazelversion b/.bazelversion index ee74734aa225..fae6e3d04b2c 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -4.1.0 +4.2.1 diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 5f9ca2079ec8..8da867149806 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:8ca107a75ee98b255aa59db2ab40fd0800a3ce99 +FROM gcr.io/envoy-ci/envoy-build:81a93046060dbe5620d5b3aa92632090a9ee4da6 ARG USERNAME=vscode ARG USER_UID=501 diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index de2aba2f6c2b..98fea4fafd82 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -30,6 +30,7 @@ 'asraa': 'UKZKCFRTP', 'davinci26': 'U013608CUDV', 'rojkov': 'UH5EXLYQK', + 'RyanTheOptimist': 'U01SW3JC8GP', } # First pass reviewers who are not maintainers should get @@ -43,7 +44,6 @@ 'KBaichoo': 'U016ZPU8KBK', 'wbpcode': 'U017KF5C0Q6', 'mathetake': 'UG9TD2FSB', - 'RyanTheOptimist': 'U01SW3JC8GP', } # Only notify API reviewers who aren't maintainers. diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index 79b8d5a17e85..fb21f429db9f 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -111,9 +111,9 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via pynacl -slack_sdk==3.11.0 \ - --hash=sha256:4d9854ee158c3137cfe1ba587dc4b777b6881aee58436d8071f36bad842acbf4 \ - --hash=sha256:8dc858cd106b639191ee3dc38fb957e55ab8dd28c2cc22feafa1223ab2def646 +slack_sdk==3.11.2 \ + --hash=sha256:131bf605894525c2d66da064677eabc19f53f02ce0f82a3f2fa130d4ec3bc1b0 \ + --hash=sha256:35245ec34c8549fbb5c43ccc17101afd725b3508bb784da46530b214f496bf93 # via -r requirements.in urllib3==1.26.6 \ --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ diff --git a/.github/workflows/check-deps.yml b/.github/workflows/check-deps.yml new file mode 100644 index 000000000000..9cdc3508284b --- /dev/null +++ b/.github/workflows/check-deps.yml @@ -0,0 +1,36 @@ +name: Check for latest_release of deps + +on : + schedule : + - cron : '0 8 * * *' + + workflow_dispatch : + +jobs : + build : + runs-on : ubuntu-latest + if: github.repository_owner == 'envoyproxy' + + steps : + - name : checkout + uses : actions/checkout/@v2 + with : + ref : ${{ github.head_ref }} + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install virtualenv + + - name: setting up virtualenv + run : | + export GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + # --create_issues flag to create issue only in github action + # and not interfere with the CI + ./tools/dependency/release_dates.sh ./bazel/repository_locations.bzl --create_issues + ./tools/dependency/release_dates.sh ./api/bazel/repository_locations.bzl --create_issues diff --git a/.github/workflows/pr_notifier.yml b/.github/workflows/pr_notifier.yml index 1ccc9695e3dd..88759f9f1414 100644 --- a/.github/workflows/pr_notifier.yml +++ b/.github/workflows/pr_notifier.yml @@ -7,6 +7,7 @@ jobs: pr_notifier: name: PR Notifier runs-on: ubuntu-latest + if: github.repository_owner == 'envoyproxy' steps: - uses: actions/checkout@v2 diff --git a/CODEOWNERS b/CODEOWNERS index 7e2d7dda5390..32cf634d3f16 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -40,6 +40,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/http/header_to_metadata @rgs1 @zuercher # alts transport socket extension /*/extensions/transport_sockets/alts @antoniovicente @asraa @yangminzhu +# tcp_stats transport socket extension +/*/extensions/transport_sockets/tcp_stats @ggreenway @mattklein123 # tls transport socket extension /*/extensions/transport_sockets/tls @lizan @asraa @ggreenway # tls SPIFFE certificate validator extension @@ -192,6 +194,9 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp /*/extensions/matching/input_matchers/ip @aguinet @snowp # Key Value store /*/extensions/key_value @alyssawilk @ryantheoptimist +# DNS Resolver +/*/extensions/network/dns_resolver/cares @junr03 @yanavlasov +/*/extensions/network/dns_resolver/apple @junr03 @yanavlasov # Contrib /contrib/exe/ @mattklein123 @lizan @@ -204,3 +209,4 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp /contrib/sxg/ @cpapazian @rgs1 @alyssawilk /contrib/sip_proxy/ @durd07 @nearbyfly @dorisd0102 /contrib/cryptomb/ @rojkov @ipuustin +/contrib/vcl/ @florincoras @rojkov diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md index 7ef47bcd6cf1..2efa3f6ddf9c 100644 --- a/EXTENSION_POLICY.md +++ b/EXTENSION_POLICY.md @@ -92,8 +92,8 @@ The `status` is one of: The extension status may be adjusted by the extension [CODEOWNERS](./CODEOWNERS) and/or Envoy maintainers based on an assessment of the above criteria. Note that the status of the extension reflects the implementation status. It is orthogonal to the API stability, for example, an extension -with configuration `envoy.foo.v3alpha.Bar` might have a `stable` implementation and -`envoy.foo.v3.Baz` can have a `wip` implementation. +API marked with `(xds.annotations.v3.file_status).work_in_progress` might have a `stable` implementation and +and an extension with a stable config proto can have a `wip` implementation. The `security_posture` is one of: * `robust_to_untrusted_downstream`: The extension is hardened against untrusted downstream traffic. It diff --git a/GOVERNANCE.md b/GOVERNANCE.md index afe7242b20cb..53659efb6734 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -98,7 +98,8 @@ or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.co * From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch from the tagged release, e.g. "release/v1.6". It will be used for the [stable releases](RELEASES.md#stable-releases). -* Monitor the AZP tag build to make sure that the final docker images get pushed along with +* Tagging will kick off another run of [AZP postsubmit](https://dev.azure.com/cncf/envoy/_build?definitionId=11). Monitor that + tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/main/docs/envoy). * Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release. diff --git a/OWNERS.md b/OWNERS.md index f4b600c7d29d..66fb84068b2c 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -47,6 +47,8 @@ routing PRs, questions, etc. to the right place. * Windows, low level networking. * Dmitry Rozhkov ([rojkov](https://github.com/rojkov)) (dmitry.rozhkov@intel.com) * Scalability and performance. +* Ryan Hamilton ([RyanTheOptimist](https://github.com/ryantheoptimist)) (rch@google.com) + * HTTP/3, upstream connection management. # Senior extension maintainers @@ -63,7 +65,6 @@ without further review. * All senior maintainers * Tony Allen ([tonya11en](https://github.com/tonya11en)) (tony@allen.gg) -* Dmitri Dolguikh ([dmitri-d](https://github.com/dmitri-d)) (ddolguik@redhat.com) * Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com) * William A Rowe Jr ([wrowe](https://github.com/wrowe)) (wrowe@vmware.com) diff --git a/README.md b/README.md index 210048e28753..f7c222723fcb 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ involved and how Envoy plays a role, read the CNCF * [Official documentation](https://www.envoyproxy.io/) * [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview) -* [Unofficial Chinese documentation](https://www.servicemesher.com/envoy/) +* [Unofficial Chinese documentation](https://cloudnative.to/envoy/) * Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE) ([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/)) to find out more about the origin story and design philosophy of Envoy diff --git a/RELEASES.md b/RELEASES.md index c72eeb63805a..1619b0d22d72 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -72,6 +72,7 @@ deadline of 3 weeks. | 1.17.0 | 2020/12/31 | 2021/01/11 | +11 days | 2022/01/11 | | 1.18.0 | 2021/03/31 | 2021/04/15 | +15 days | 2022/04/15 | | 1.19.0 | 2021/06/30 | 2021/07/13 | +13 days | 2022/07/13 | -| 1.20.0 | 2021/09/30 | | | | +| 1.20.0 | 2021/09/30 | 2021/10/05 | +5 days | 2022/10/13 | +| 1.21.0 | 2021/12/30 | | | | [repokitteh]: https://github.com/repokitteh diff --git a/VERSION b/VERSION index 734375f897d0..c6ba48dc6375 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.20.0-dev +1.21.0-dev diff --git a/api/BUILD b/api/BUILD index 341cc48a2214..f814a6d06e58 100644 --- a/api/BUILD +++ b/api/BUILD @@ -67,6 +67,7 @@ proto_library( "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg", "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg", + "//contrib/envoy/extensions/vcl/v3alpha:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", @@ -97,10 +98,10 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", + "//envoy/extensions/access_loggers/open_telemetry/v3:pkg", "//envoy/extensions/access_loggers/stream/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", + "//envoy/extensions/cache/simple_http_cache/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", @@ -116,14 +117,14 @@ proto_library( "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/common/matcher/action/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", + "//envoy/extensions/filters/http/admission_control/v3:pkg", "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", + "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", + "//envoy/extensions/filters/http/cache/v3:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3:pkg", "//envoy/extensions/filters/http/composite/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", @@ -132,7 +133,7 @@ proto_library( "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", @@ -147,7 +148,7 @@ proto_library( "//envoy/extensions/filters/http/kill_request/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", + "//envoy/extensions/filters/http/oauth2/v3:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", @@ -175,14 +176,14 @@ proto_library( "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", + "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", @@ -197,6 +198,8 @@ proto_library( "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", "//envoy/extensions/matching/input_matchers/ip/v3:pkg", + "//envoy/extensions/network/dns_resolver/apple/v3:pkg", + "//envoy/extensions/network/dns_resolver/cares/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", @@ -215,9 +218,10 @@ proto_library( "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", + "//envoy/extensions/transport_sockets/s2a/v3:pkg", "//envoy/extensions/transport_sockets/starttls/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", + "//envoy/extensions/transport_sockets/tcp_stats/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/extensions/upstreams/http/generic/v3:pkg", "//envoy/extensions/upstreams/http/http/v3:pkg", @@ -225,14 +229,14 @@ proto_library( "//envoy/extensions/upstreams/http/v3:pkg", "//envoy/extensions/upstreams/tcp/generic/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", + "//envoy/extensions/watchdog/profile_action/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", + "//envoy/service/ext_proc/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", @@ -250,7 +254,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", + "//envoy/watchdog/v3:pkg", ], ) diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl index e8283e4fee10..d541512ce98b 100644 --- a/api/bazel/external_deps.bzl +++ b/api/bazel/external_deps.bzl @@ -71,7 +71,7 @@ USE_CATEGORIES = [ USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] def _fail_missing_attribute(attr, key): - fail("The '%s' attribute must be defined for external dependecy " % attr + key) + fail("The '%s' attribute must be defined for external dependency " % attr + key) # Method for verifying content of the repository location specifications. # diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index ac80f72aeade..3adc1245a879 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,9 +4,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "bazel-skylib", project_desc = "Common useful functions and rules for Bazel", project_url = "https://github.com/bazelbuild/bazel-skylib", - version = "1.0.3", - sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", - release_date = "2020-08-27", + version = "1.1.1", + sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d", + release_date = "2021-09-27", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], use_category = ["api"], ), @@ -14,9 +14,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "protoc-gen-validate (PGV)", project_desc = "protoc plugin to generate polyglot message validators", project_url = "https://github.com/envoyproxy/protoc-gen-validate", - version = "0.6.1", - sha256 = "c695fc5a2e5a1b52904cd8a58ce7a1c3a80f7f50719496fd606e551685c01101", - release_date = "2021-04-26", + version = "0.6.2", + sha256 = "b02da533c77023238c556982507b9a71afc850478b637a7a13ec13f311efa5c0", + release_date = "2021-10-21", strip_prefix = "protoc-gen-validate-{version}", urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v{version}.tar.gz"], use_category = ["api"], @@ -32,9 +32,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel build tools", project_desc = "Developer tools for working with Google's bazel buildtool.", project_url = "https://github.com/bazelbuild/buildtools", - version = "4.0.1", - sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1", - release_date = "2021-03-01", + version = "4.2.2", + sha256 = "ae34c344514e08c23e90da0e2d6cb700fcd28e80c02e23e4d5715dddcb42f7b3", + release_date = "2021-10-07", strip_prefix = "buildtools-{version}", urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"], use_category = ["api"], @@ -44,9 +44,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "xDS API Working Group (xDS-WG)", project_url = "https://github.com/cncf/xds", # During the UDPA -> xDS migration, we aren't working with releases. - version = "25de7278fc844d392d607214f36dbedf50f167ee", - sha256 = "4107d57fbfb0d1a20a191c4eac59579ec029242928ea0c75d0e42728380cf369", - release_date = "2021-09-22", + version = "cb28da3451f158a947dfc45090fe92b07b243bc1", + sha256 = "5bc8365613fe2f8ce6cc33959b7667b13b7fe56cb9d16ba740c06e1a7c4242fc", + release_date = "2021-10-11", strip_prefix = "xds-{version}", urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], use_category = ["api"], @@ -100,20 +100,20 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Protobuf Rules for Bazel", project_desc = "Protocol buffer rules for Bazel", project_url = "https://github.com/bazelbuild/rules_proto", - version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf", - sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8", - release_date = "2021-02-09", + version = "4.0.0", + sha256 = "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1", + release_date = "2021-09-15", strip_prefix = "rules_proto-{version}", - urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], + urls = ["https://github.com/bazelbuild/rules_proto/archive/refs/tags/{version}.tar.gz"], use_category = ["api"], ), opentelemetry_proto = dict( project_name = "OpenTelemetry Proto", project_desc = "Language Independent Interface Types For OpenTelemetry", project_url = "https://github.com/open-telemetry/opentelemetry-proto", - version = "0.9.0", - sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00", - release_date = "2021-05-12", + version = "0.11.0", + sha256 = "985367f8905e91018e636cbf0d83ab3f834b665c4f5899a27d10cae9657710e2", + release_date = "2021-10-07", strip_prefix = "opentelemetry-proto-{version}", urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], use_category = ["api"], diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD b/api/contrib/envoy/extensions/vcl/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD rename to api/contrib/envoy/extensions/vcl/v3alpha/BUILD diff --git a/api/contrib/envoy/extensions/vcl/v3alpha/vcl_socket_interface.proto b/api/contrib/envoy/extensions/vcl/v3alpha/vcl_socket_interface.proto new file mode 100644 index 000000000000..d8219df358f4 --- /dev/null +++ b/api/contrib/envoy/extensions/vcl/v3alpha/vcl_socket_interface.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.extensions.vcl.v3alpha; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.vcl.v3alpha"; +option java_outer_classname = "VclSocketInterfaceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Vcl Socket Interface configuration] +// VCL :ref:`configuration overview `. +// [#extension: envoy.bootstrap.vcl] + +// Configuration for vcl socket interface that relies on `vpp` `comms` library (VCL) +message VclSocketInterface { +} diff --git a/api/envoy/api/v2/auth/tls.proto b/api/envoy/api/v2/auth/tls.proto index 201973a2b9de..3f7255379ab7 100644 --- a/api/envoy/api/v2/auth/tls.proto +++ b/api/envoy/api/v2/auth/tls.proto @@ -81,10 +81,9 @@ message DownstreamTlsContext { bool disable_stateless_session_resumption = 7; } - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). + // If specified, ``session_timeout`` will change the maximum lifetime (in seconds) of the TLS session. + // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only seconds can be specified (fractional seconds are ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 0e8de3663335..0cb494e16323 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -248,9 +248,6 @@ message Bootstrap { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. // This field is deprecated in favor of *dns_resolution_config* // which aggregates all of the DNS resolver configuration in a single message. bool use_tcp_for_dns_lookups = 20 @@ -260,23 +257,22 @@ message Bootstrap { // This may be overridden on a per-cluster basis in cds_config, when // :ref:`dns_resolution_config ` // is specified. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 30; + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + core.v3.DnsResolutionConfig dns_resolution_config = 30 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. + // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] + // [#extension-category: envoy.network.dns_resolver] core.v3.TypedExtensionConfig typed_dns_resolver_config = 31; // Specifies optional bootstrap extensions to be instantiated at startup time. diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 495d6ce39788..ef5ab33d5a2f 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 56] +// [#next-free-field: 57] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -112,9 +112,9 @@ message Cluster { // Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] + // This has been deprecated in favor of using the :ref:`load_balancing_policy + // ` field without + // setting any value in :ref:`lb_policy`. LOAD_BALANCING_POLICY_CONFIG = 7; } @@ -129,6 +129,8 @@ message Cluster { // If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the // IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback // target will only get v6 addresses if there were NO v4 addresses to return. + // If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, + // and return all resolved addresses. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, @@ -140,6 +142,7 @@ message Cluster { V4_ONLY = 1; V6_ONLY = 2; V4_PREFERRED = 3; + ALL = 4; } enum ClusterProtocolSelection { @@ -345,6 +348,35 @@ message Cluster { bool list_as_any = 7; } + // Configuration for :ref:`slow start mode `. + message SlowStartConfig { + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + google.protobuf.Duration slow_start_window = 1; + + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // `new_weight = weight * time_factor ^ (1 / aggression)`, + // where `time_factor=(time_since_start_seconds / slow_start_time_seconds)`. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + core.v3.RuntimeDouble aggression = 2; + } + + // Specific configuration for the RoundRobin load balancing policy. + message RoundRobinLbConfig { + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 1; + } + // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { option (udpa.annotations.versioning).previous_message_type = @@ -378,6 +410,10 @@ message Cluster { // .. note:: // This setting only takes effect if all host weights are not equal. core.v3.RuntimeDouble active_request_bias = 2; + + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 3; } // Specific configuration for the :ref:`RingHash` @@ -867,41 +903,34 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. // This field is deprecated in favor of *dns_resolution_config* // which aggregates all of the DNS resolver configuration in a single message. repeated core.v3.Address dns_resolvers = 18 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. // This field is deprecated in favor of *dns_resolution_config* // which aggregates all of the DNS resolver configuration in a single message. bool use_tcp_for_dns_lookups = 45 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 53; + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + core.v3.DnsResolutionConfig dns_resolution_config = 53 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. + // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] + // [#extension-category: envoy.network.dns_resolver] core.v3.TypedExtensionConfig typed_dns_resolver_config = 55; // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for @@ -959,6 +988,9 @@ message Cluster { // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; + + // Optional configuration for the RoundRobin load balancing policy. + RoundRobinLbConfig round_robin_lb_config = 56; } // Common configuration for all load balancer implementations. @@ -1015,9 +1047,8 @@ message Cluster { // servers of this cluster. repeated Filter filters = 40; - // New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + // If this field is set and is supported by the client, it will supersede the value of + // :ref:`lb_policy`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index dcfc660dd028..c9921f25efe3 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -296,6 +296,15 @@ message RuntimeFeatureFlag { string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } +// Query parameter name/value pair. +message QueryParameter { + // The key of the query parameter. Case sensitive. + string key = 1 [(validate.rules).string = {min_len: 1}]; + + // The value of the query parameter. + string value = 2; +} + // Header name/value pair. message HeaderValue { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; @@ -363,7 +372,7 @@ message WatchedDirectory { string path = 1 [(validate.rules).string = {min_len: 1}]; } -// Data source consisting of either a file or an inline value. +// Data source consisting of a file, an inline value, or an environment variable. message DataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; @@ -378,6 +387,9 @@ message DataSource { // String inlined in the configuration. string inline_string = 3; + + // Environment variable data source. + string environment_variable = 4 [(validate.rules).string = {min_len: 1}]; } } diff --git a/api/envoy/config/core/v3/extension.proto b/api/envoy/config/core/v3/extension.proto index ba66da6a8e36..adb7cb1ba400 100644 --- a/api/envoy/config/core/v3/extension.proto +++ b/api/envoy/config/core/v3/extension.proto @@ -24,8 +24,9 @@ message TypedExtensionConfig { string name = 1 [(validate.rules).string = {min_len: 1}]; // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, - // the inner type URL of *TypedStruct* will be utilized. See the + // the extension. In the case that the type URL is *xds.type.v3.TypedStruct* + // (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type + // URL of *TypedStruct* will be utilized. See the // :ref:`extension configuration overview // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index 304297e7c011..81a9b6c7f153 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -73,7 +73,7 @@ message HealthCheck { } } - // [#next-free-field: 12] + // [#next-free-field: 13] message HttpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; @@ -118,6 +118,18 @@ message HealthCheck { // range are required. Only statuses in the range [100, 600) are allowed. repeated type.v3.Int64Range expected_statuses = 9; + // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + // will count towards the configured :ref:`unhealthy_threshold `, + // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + // :ref:`Int64Range `. The start and end of each range are required. + // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + // be considered a successful health check. By default all responses not in + // :ref:`expected_statuses ` will result in + // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + // non-200 response will result in the host being marked unhealthy. + repeated type.v3.Int64Range retriable_statuses = 12; + // Use specified application protocol for health checks. type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; @@ -243,8 +255,10 @@ message HealthCheck { uint32 interval_jitter_percent = 18; // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. + // unhealthy. Note that for *http* health checking if a host responds with a code not in + // :ref:`expected_statuses ` + // or :ref:`retriable_statuses `, + // this threshold is ignored and the host is considered immediately unhealthy. google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 4535b1666738..49ee04c3aed8 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -28,11 +28,38 @@ message TcpProtocolOptions { "envoy.api.v2.core.TcpProtocolOptions"; } +// Config for keepalive probes in a QUIC connection. +// Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet +// itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. +message QuicKeepAliveSettings { + // The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes. + // + // If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + // + // If zero, disable keepalive probing. + // If absent, use the QUICHE default interval to probe. + google.protobuf.Duration max_interval = 1 [(validate.rules).duration = { + lte {} + gte {seconds: 1} + }]; + + // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + // + // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + // + // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + google.protobuf.Duration initial_interval = 2 [(validate.rules).duration = { + lte {} + gte {seconds: 1} + }]; +} + // QUIC protocol options which apply to both downstream and upstream connections. +// [#next-free-field: 6] message QuicProtocolOptions { // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; + google.protobuf.UInt32Value max_concurrent_streams = 1 [(validate.rules).uint32 = {gte: 1}]; // `Initial stream-level flow-control receive window // `_ size. Valid values range from @@ -55,6 +82,20 @@ message QuicProtocolOptions { // window size now, so it's also the minimum. google.protobuf.UInt32Value initial_connection_window_size = 3 [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; + + // [#not-implemented-hide:] Hiding until timeout config is supported. + // The number of timeouts that can occur before port migration is triggered for QUIC clients. + // This defaults to 1. If sets to 0, port migration will not occur. + // Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. + // This has no effect on server sessions. + // Currently the value can only be 0 or 1. + // TODO(renjietang): Plumb through quiche to make this config able to adjust the amount of timeouts needed to trigger port migration. + google.protobuf.UInt32Value num_timeouts_to_trigger_port_migration = 4 + [(validate.rules).uint32 = {lte: 1 gte: 0}]; + + // Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. + // If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. + QuicKeepAliveSettings connection_keepalive = 5; } message UpstreamHttpProtocolOptions { @@ -157,11 +198,10 @@ message HttpProtocolOptions { // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached - // and if there are no active streams, the connection will be closed. If there are any active streams, - // the drain sequence will kick-in, and the connection will be force-closed after the drain period. - // See :ref:`drain_timeout + // and if there are no active streams, the connection will be closed. If the connection is a + // downstream connection and there are any active streams, the drain sequence will kick-in, + // and the connection will be force-closed after the drain period. See :ref:`drain_timeout // `. - // Note: This feature is not yet implemented for the upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default @@ -290,6 +330,8 @@ message KeepaliveSettings { // If this is zero, this type of PING will not be sent. // If an interval ping is outstanding, a second ping will not be sent as the // interval ping will determine if the connection is dead. + // + // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. google.protobuf.Duration connection_idle_interval = 4 [(validate.rules).duration = {gte {nanos: 1000000}}]; } diff --git a/api/envoy/config/core/v3/resolver.proto b/api/envoy/config/core/v3/resolver.proto index 21d40425f7a6..2ca4eaab0e5d 100644 --- a/api/envoy/config/core/v3/resolver.proto +++ b/api/envoy/config/core/v3/resolver.proto @@ -17,9 +17,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. message DnsResolverOptions { // Use TCP for all DNS queries instead of the default protocol UDP. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 1; // Do not use the default search domains; only query hostnames as-is or as aliases. @@ -31,9 +28,6 @@ message DnsResolutionConfig { // A list of dns resolver addresses. If specified, the DNS client library will perform resolution // via the underlying DNS resolvers. Otherwise, the default system resolvers // (e.g., /etc/resolv.conf) will be used. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index a5cd4bfe976f..92fb7e168a75 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -153,7 +153,6 @@ message Listener { // UDP Listener filters can be specified when the protocol in the listener socket address in // :ref:`protocol ` is :ref:`UDP // `. - // UDP listeners currently support a single filter. repeated ListenerFilter listener_filters = 9; // The timeout to wait for all listener filters to complete operation. If the timeout is reached, diff --git a/api/envoy/config/route/v3/BUILD b/api/envoy/config/route/v3/BUILD index 81cdfdf8a93a..b82843eee7dd 100644 --- a/api/envoy/config/route/v3/BUILD +++ b/api/envoy/config/route/v3/BUILD @@ -15,5 +15,7 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", ], ) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index ec27e627d346..5a915eee87ca 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -17,6 +17,9 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "xds/annotations/v3/status.proto"; +import "xds/type/matcher/v3/matcher.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; @@ -37,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // host header. This allows a single listener to service multiple top level domain path trees. Once // a virtual host is selected based on the domain, the routes are processed in order to see which // upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] +// [#next-free-field: 22] message VirtualHost { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost"; @@ -87,8 +90,15 @@ message VirtualHost { // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. + // Only one of this and `matcher` can be specified. repeated Route routes = 3; + // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] + // The match tree to use when resolving route actions for incoming requests. Only one of this and `routes` + // can be specified. + xds.type.matcher.v3.Matcher matcher = 21 + [(xds.annotations.v3.field_status).work_in_progress = true]; + // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; @@ -529,6 +539,14 @@ message RouteMatch { // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. + // + // .. note:: + // + // If query parameters are used to pass request message fields when + // `grpc_json_transcoder `_ + // is used, the transcoded message fields maybe different. The query parameters are + // url encoded, but the message fields are not. For example, if a query + // parameter is "foo%20bar", the message field will be "foo bar". repeated QueryParameterMatcher query_parameters = 7; // If specified, only gRPC requests will be matched. The router will check diff --git a/api/envoy/config/route/v3/scoped_route.proto b/api/envoy/config/route/v3/scoped_route.proto index eb47d7e10898..1b0904ec5723 100644 --- a/api/envoy/config/route/v3/scoped_route.proto +++ b/api/envoy/config/route/v3/scoped_route.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package envoy.config.route.v3; +import "envoy/config/route/v3/route.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -16,7 +19,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Specifies a routing scope, which associates a // :ref:`Key` to a -// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). +// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. +// The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically +// via RDS (:ref:`route_configuration_name`) +// or specified inline (:ref:`route_configuration`). // // The HTTP connection manager builds up a table consisting of these Key to // RouteConfiguration mappings, and looks up the RouteConfiguration to use per @@ -73,6 +79,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // would result in the routing table defined by the `route-config1` // RouteConfiguration being assigned to the HTTP request/stream. // +// [#next-free-field: 6] message ScopedRouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ScopedRouteConfiguration"; @@ -113,7 +120,12 @@ message ScopedRouteConfiguration { // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; + string route_configuration_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "route_config"]; + + // The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + RouteConfiguration route_configuration = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "route_config"]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD b/api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD similarity index 100% rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD rename to api/envoy/extensions/access_loggers/open_telemetry/v3/BUILD diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto similarity index 93% rename from api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto rename to api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto index 1b7027133e15..cd4a63181290 100644 --- a/api/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto +++ b/api/envoy/extensions/access_loggers/open_telemetry/v3/logs_service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.access_loggers.open_telemetry.v3alpha; +package envoy.extensions.access_loggers.open_telemetry.v3; import "envoy/extensions/access_loggers/grpc/v3/als.proto"; @@ -9,10 +9,9 @@ import "opentelemetry/proto/common/v1/common.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3"; option java_outer_classname = "LogsServiceProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OpenTelemetry (gRPC) Access Log] diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/api/envoy/extensions/cache/simple_http_cache/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD rename to api/envoy/extensions/cache/simple_http_cache/v3/BUILD diff --git a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto similarity index 83% rename from api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto rename to api/envoy/extensions/cache/simple_http_cache/v3/config.proto index 1b42e9b3f93d..e7bd7cdbdf91 100644 --- a/api/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto +++ b/api/envoy/extensions/cache/simple_http_cache/v3/config.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package envoy.extensions.cache.simple_http_cache.v3alpha; +package envoy.extensions.cache.simple_http_cache.v3; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index c4fc8285ee59..449ffee87c15 100644 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -32,4 +32,26 @@ message ClusterConfig { // in the :ref:`cluster's upstream_http_protocol_options // ` bool allow_insecure_cluster_options = 2; + + // [#not-implemented-hide:] + // If true allow HTTP/2 and HTTP/3 connections to be reused for requests to different + // origins than the connection was initially created for. This will only happen when the + // resolved address for the new connection matches the peer address of the connection and + // the TLS certificate is also valid for the new hostname. For example, if a connection + // has previously been established to foo.example.com at IP 1.2.3.4 with a certificate + // that is valid for `*.example.com`, then this connection could be used for requests to + // bar.example.com if that also resolved to 1.2.3.4. + // + // .. note:: + // By design, this feature will maximize reuse of connections. This means that instead + // opening a new connection when an existing connection reaches the maximum number of + // concurrent streams, requests will instead be sent to the existing connection. + // TODO(alyssawilk) implement request queueing in connections. + // + // .. note:: + // The coalesced connections might be to upstreams that would not be otherwise + // selected by Envoy. See the section `Connection Reuse in RFC 7540 + // `_ + // + bool allow_coalesced_connections = 3; } diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index e3904ae28719..f2f6db7f1332 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -55,16 +55,12 @@ message DnsCacheConfig { config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2 [(validate.rules).enum = {defined_only: true}]; - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: + // The DNS refresh rate for unresolved DNS hosts. If not specified defaults to 60s. // // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. + // + // Once a host has been resolved, the refresh rate will be the DNS TTL, capped + // at a minimum of 5s. google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gte {nanos: 1000000}}]; @@ -101,32 +97,28 @@ message DnsCacheConfig { DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. // This field is deprecated in favor of *dns_resolution_config* // which aggregates all of the DNS resolver configuration in a single message. bool use_tcp_for_dns_lookups = 8 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - config.core.v3.DnsResolutionConfig dns_resolution_config = 9; + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + config.core.v3.DnsResolutionConfig dns_resolution_config = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. + // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] + // [#extension-category: envoy.network.dns_resolver] config.core.v3.TypedExtensionConfig typed_dns_resolver_config = 12; // Hostnames that should be preresolved into the cache upon creation. This might provide a diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/api/envoy/extensions/filters/http/admission_control/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD rename to api/envoy/extensions/filters/http/admission_control/v3/BUILD diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto similarity index 96% rename from api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto rename to api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto index 9bb3603f9ebd..702f03019b1c 100644 --- a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ b/api/envoy/extensions/filters/http/admission_control/v3/admission_control.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.admission_control.v3alpha; +package envoy.extensions.filters.http.admission_control.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/range.proto"; @@ -10,10 +10,9 @@ import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3"; option java_outer_classname = "AdmissionControlProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Admission Control] diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD b/api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/BUILD diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto similarity index 93% rename from api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto rename to api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto index 4cd5f8268b70..c512d541aaef 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.bandwidth_limit.v3alpha; +package envoy.extensions.filters.http.bandwidth_limit.v3; import "envoy/config/core/v3/base.proto"; @@ -10,10 +10,9 @@ import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3"; option java_outer_classname = "BandwidthLimitProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Bandwidth limit] diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/BUILD b/api/envoy/extensions/filters/http/cache/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/cache/v3alpha/BUILD rename to api/envoy/extensions/filters/http/cache/v3/BUILD diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3/cache.proto similarity index 96% rename from api/envoy/extensions/filters/http/cache/v3alpha/cache.proto rename to api/envoy/extensions/filters/http/cache/v3/cache.proto index 5f0a5befa4bb..71f4a5bb73f9 100644 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3/cache.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.cache.v3alpha; +package envoy.extensions.filters.http.cache.v3; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -11,10 +11,9 @@ import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Cache Filter] diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD b/api/envoy/extensions/filters/http/cdn_loop/v3/BUILD similarity index 100% rename from api/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD rename to api/envoy/extensions/filters/http/cdn_loop/v3/BUILD diff --git a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto similarity index 89% rename from api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto rename to api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto index 5f201026c66b..77a19511c3d4 100644 --- a/api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto +++ b/api/envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.proto @@ -1,14 +1,13 @@ syntax = "proto3"; -package envoy.extensions.filters.http.cdn_loop.v3alpha; +package envoy.extensions.filters.http.cdn_loop.v3; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3"; option java_outer_classname = "CdnLoopProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP CDN-Loop Filter] diff --git a/api/envoy/extensions/filters/http/ext_proc/v3/BUILD b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD new file mode 100644 index 000000000000..e9b556d681cf --- /dev/null +++ b/api/envoy/extensions/filters/http/ext_proc/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto similarity index 96% rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto rename to api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto index 37560feba3c2..e688657830a0 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto @@ -1,20 +1,22 @@ syntax = "proto3"; -package envoy.extensions.filters.http.ext_proc.v3alpha; +package envoy.extensions.filters.http.ext_proc.v3; import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; +import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto"; import "google/protobuf/duration.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3"; option java_outer_classname = "ExtProcProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Filter] // External Processing Filter @@ -88,9 +90,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The protocol itself is based on a bidirectional gRPC stream. Envoy will send the // server -// :ref:`ProcessingRequest ` +// :ref:`ProcessingRequest ` // messages, and the server must reply with -// :ref:`ProcessingResponse `. +// :ref:`ProcessingResponse `. // [#next-free-field: 9] message ExternalProcessor { diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto similarity index 93% rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto rename to api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto index d085790d34ab..c15a5569a12c 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto @@ -1,15 +1,17 @@ syntax = "proto3"; -package envoy.extensions.filters.http.ext_proc.v3alpha; +package envoy.extensions.filters.http.ext_proc.v3; + +import "xds/annotations/v3/status.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3"; option java_outer_classname = "ProcessingModeProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Filter] // External Processing Filter Processing Mode diff --git a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index a4feeff31f15..7311abe8df6f 100644 --- a/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -15,7 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC-JSON transcoder :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_json_transcoder] -// [#next-free-field: 12] +// [#next-free-field: 13] // GrpcJsonTranscoder filter configuration. // The filter itself can be used per route / per virtual host or on the general level. The most // specific one is being used for a given route. If the list of services is empty - filter @@ -211,12 +211,16 @@ message GrpcJsonTranscoder { bool convert_grpc_status = 9; // URL unescaping policy. - // This spec is only applied when extracting variable with multiple segments. + // This spec is only applied when extracting variable with multiple segments in the URL path. // For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments. // For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`. // If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`. UrlUnescapeSpec url_unescape_spec = 10 [(validate.rules).enum = {defined_only: true}]; + // If true, unescape '+' to space when extracting variables in query parameters. + // This is to support `HTML 2.0 `_ + bool query_param_unescape_plus = 12; + // Configure the behavior when handling requests that cannot be transcoded. // // By default, the transcoder will silently pass through HTTP requests that are malformed. diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD b/api/envoy/extensions/filters/http/oauth2/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD rename to api/envoy/extensions/filters/http/oauth2/v3/BUILD diff --git a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto similarity index 74% rename from api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto rename to api/envoy/extensions/filters/http/oauth2/v3/oauth.proto index e5f990512ca8..15c71f3550fe 100644 --- a/api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ b/api/envoy/extensions/filters/http/oauth2/v3/oauth.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.oauth2.v3alpha; +package envoy.extensions.filters.http.oauth2.v3; import "envoy/config/core/v3/http_uri.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -10,10 +10,9 @@ import "envoy/type/matcher/v3/path.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3"; option java_outer_classname = "OauthProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OAuth] @@ -22,6 +21,24 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // message OAuth2Credentials { + message CookieNames { + // Cookie name to hold OAuth bearer token value. When the authentication server validates the + // client and returns an authorization token back to the OAuth filter, no matter what format + // that token is, if :ref:`forward_bearer_token ` + // is set to true the filter will send over the bearer token as a cookie with this name to the + // upstream. Defaults to ``BearerToken``. + string bearer_token = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; + + // Cookie name to hold OAuth HMAC value. Defaults to ``OauthHMAC``. + string oauth_hmac = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; + + // Cookie name to hold OAuth expiry value. Defaults to ``OauthExpires``. + string oauth_expires = 3 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; + } + // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. string client_id = 1 [(validate.rules).string = {min_len: 1}]; @@ -37,6 +54,9 @@ message OAuth2Credentials { transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3 [(validate.rules).message = {required: true}]; } + + // The cookie names used in OAuth filters flow. + CookieNames cookie_names = 4; } // OAuth config diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 4c0cb91ad03b..53fb849361c1 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -8,7 +8,6 @@ import "envoy/config/route/v3/route_components.proto"; import "envoy/type/metadata/v3/metadata.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 3fb4bfa09e20..93d255904fb8 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -911,7 +911,7 @@ message ScopedRoutes { // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. - config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; + config.core.v3.ConfigSource rds_config_source = 3; oneof config_specifier { option (validate.required) = true; diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/BUILD diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto similarity index 92% rename from api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto rename to api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto index 7f7eb57d5be6..a084b0682b67 100644 --- a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto +++ b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto @@ -1,16 +1,15 @@ syntax = "proto3"; -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha; +package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3"; option java_outer_classname = "SniDynamicForwardProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI dynamic forward proxy] diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto index 4916330ec5f3..01c41c77bb2b 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto @@ -85,8 +85,8 @@ message ThriftProxy { repeated ThriftFilter thrift_filters = 5; // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will + // This mode will only work if the upstream and downstream protocols are the same and the transports + // are Framed or Header, and the protocol is not Twitter. Otherwise Envoy will // fallback to decode the data. bool payload_passthrough = 6; diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD rename to api/envoy/extensions/filters/udp/dns_filter/v3/BUILD diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto similarity index 72% rename from api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto rename to api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto index 39f44724c430..b46314ea196f 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v3/dns_filter.proto @@ -1,9 +1,10 @@ syntax = "proto3"; -package envoy.extensions.filters.udp.dns_filter.v3alpha; +package envoy.extensions.filters.udp.dns_filter.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/resolver.proto"; import "envoy/data/dns/v3/dns_table.proto"; @@ -13,10 +14,9 @@ import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: DNS Filter] @@ -68,7 +68,23 @@ message DnsFilterConfig { [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // DNS resolution configuration which includes the underlying dns resolver addresses and options. - config.core.v3.DnsResolutionConfig dns_resolution_config = 5; + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + config.core.v3.DnsResolutionConfig dns_resolution_config = 5 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, + // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. + // When *typed_dns_resolver_config* is missing, the default behavior is in place. + // [#extension-category: envoy.network.dns_resolver] + config.core.v3.TypedExtensionConfig typed_dns_resolver_config = 4; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD b/api/envoy/extensions/network/dns_resolver/apple/v3/BUILD similarity index 100% rename from api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD rename to api/envoy/extensions/network/dns_resolver/apple/v3/BUILD diff --git a/api/envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.proto b/api/envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.proto new file mode 100644 index 000000000000..cc47e69dedc0 --- /dev/null +++ b/api/envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.network.dns_resolver.apple.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.dns_resolver.apple.v3"; +option java_outer_classname = "AppleDnsResolverProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: apple DNS resolver] +// [#extension: envoy.network.dns_resolver.apple] + +// Configuration for apple DNS resolver. +message AppleDnsResolverConfig { +} diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD b/api/envoy/extensions/network/dns_resolver/cares/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD rename to api/envoy/extensions/network/dns_resolver/cares/v3/BUILD diff --git a/api/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto b/api/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto new file mode 100644 index 000000000000..05464231cbb2 --- /dev/null +++ b/api/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.network.dns_resolver.cares.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/resolver.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.dns_resolver.cares.v3"; +option java_outer_classname = "CaresDnsResolverProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: c-ares DNS resolver] +// [#extension: envoy.network.dns_resolver.cares] + +// Configuration for c-ares DNS resolver. +message CaresDnsResolverConfig { + // A list of dns resolver addresses. If specified, the DNS client library will perform resolution + // via the underlying DNS resolvers. Otherwise, the default system resolvers + // (e.g., /etc/resolv.conf) will be used. + repeated config.core.v3.Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + config.core.v3.DnsResolverOptions dns_resolver_options = 2; +} diff --git a/api/envoy/watchdog/v3alpha/BUILD b/api/envoy/extensions/transport_sockets/s2a/v3/BUILD similarity index 100% rename from api/envoy/watchdog/v3alpha/BUILD rename to api/envoy/extensions/transport_sockets/s2a/v3/BUILD diff --git a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto similarity index 83% rename from api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto rename to api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto index b32b84653e69..7c77222f59d6 100644 --- a/api/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto +++ b/api/envoy/extensions/transport_sockets/s2a/v3/s2a.proto @@ -1,14 +1,13 @@ syntax = "proto3"; -package envoy.extensions.transport_sockets.s2a.v3alpha; +package envoy.extensions.transport_sockets.s2a.v3; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3"; option java_outer_classname = "S2aProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#not-implemented-hide:] diff --git a/generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD b/api/envoy/extensions/transport_sockets/tcp_stats/v3/BUILD similarity index 100% rename from generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD rename to api/envoy/extensions/transport_sockets/tcp_stats/v3/BUILD index e7414acdda46..1c1a6f6b4423 100644 --- a/generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/BUILD +++ b/api/envoy/extensions/transport_sockets/tcp_stats/v3/BUILD @@ -1,9 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - api_proto_package( deps = [ "//envoy/config/core/v3:pkg", diff --git a/api/envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.proto b/api/envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.proto new file mode 100644 index 000000000000..cd729e279adb --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tcp_stats.v3; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tcp_stats.v3"; +option java_outer_classname = "TcpStatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TCP Stats Transport Socket wrapper] +// [#extension: envoy.transport_sockets.tcp_stats] + +// Configuration for the TCP Stats transport socket wrapper, which wraps another transport socket for +// all communication, but emits stats about the underlying TCP connection. +// +// The stats are documented :ref:`here ` for listeners and +// :ref:`here ` for clusters. +// +// This transport socket is currently only supported on Linux. +message Config { + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 1 [(validate.rules).message = {required: true}]; + + // Period to update stats while the connection is open. If unset, updates only happen when the + // connection is closed. Stats are always updated one final time when the connection is closed. + google.protobuf.Duration update_period = 2 [(validate.rules).duration = {gte {nanos: 1000000}}]; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto index f680207955a8..1bf15d2222d2 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -109,10 +109,9 @@ message DownstreamTlsContext { bool disable_stateless_session_resumption = 7; } - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). + // If specified, ``session_timeout`` will change the maximum lifetime (in seconds) of the TLS session. + // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only seconds can be specified (fractional seconds are ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} diff --git a/api/envoy/extensions/watchdog/profile_action/v3/BUILD b/api/envoy/extensions/watchdog/profile_action/v3/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/watchdog/profile_action/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto similarity index 87% rename from api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto rename to api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto index d73f0b5dfb9c..07c3907fbd61 100644 --- a/api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ b/api/envoy/extensions/watchdog/profile_action/v3/profile_action.proto @@ -1,16 +1,15 @@ syntax = "proto3"; -package envoy.extensions.watchdog.profile_action.v3alpha; +package envoy.extensions.watchdog.profile_action.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3"; option java_outer_classname = "ProfileActionProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that does CPU profiling.] diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index 31adbc161b88..11fc057da888 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -12,7 +12,6 @@ import "google/rpc/status.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "ExternalAuthProto"; @@ -61,7 +60,7 @@ message DeniedHttpResponse { } // HTTP attributes for an OK response. -// [#next-free-field: 7] +// [#next-free-field: 9] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -103,6 +102,15 @@ message OkHttpResponse { // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` // defaults to false when used in this message. repeated config.core.v3.HeaderValueOption response_headers_to_add = 6; + + // This field allows the authorization service to set (and overwrite) query + // string parameters on the original request before it is sent upstream. + repeated config.core.v3.QueryParameter query_parameters_to_set = 7; + + // This field allows the authorization service to specify which query parameters + // should be removed from the original request before it is sent upstream. Each + // element in this list is a case-sensitive query parameter name to be removed. + repeated string query_parameters_to_remove = 8; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/api/envoy/service/ext_proc/v3alpha/BUILD b/api/envoy/service/ext_proc/v3/BUILD similarity index 76% rename from api/envoy/service/ext_proc/v3alpha/BUILD rename to api/envoy/service/ext_proc/v3/BUILD index 4f3730e2af32..d4506b16ed5d 100644 --- a/api/envoy/service/ext_proc/v3alpha/BUILD +++ b/api/envoy/service/ext_proc/v3/BUILD @@ -8,8 +8,9 @@ api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", ], ) diff --git a/api/envoy/service/ext_proc/v3alpha/external_processor.proto b/api/envoy/service/ext_proc/v3/external_processor.proto similarity index 97% rename from api/envoy/service/ext_proc/v3alpha/external_processor.proto rename to api/envoy/service/ext_proc/v3/external_processor.proto index 09572331aa42..dc6b527d5bcc 100644 --- a/api/envoy/service/ext_proc/v3alpha/external_processor.proto +++ b/api/envoy/service/ext_proc/v3/external_processor.proto @@ -1,22 +1,24 @@ syntax = "proto3"; -package envoy.service.ext_proc.v3alpha; +package envoy.service.ext_proc.v3; import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; +import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto"; import "envoy/type/v3/http_status.proto"; import "google/protobuf/struct.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha"; +option java_package = "io.envoyproxy.envoy.service.ext_proc.v3"; option java_outer_classname = "ExternalProcessorProto"; option java_multiple_files = true; option java_generic_services = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; // [#protodoc-title: External Processing Service] @@ -167,7 +169,7 @@ message ProcessingResponse { // for the duration of this particular request/response only. Servers // may use this to intelligently control how requests are processed // based on the headers and other metadata that they see. - envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9; + envoy.extensions.filters.http.ext_proc.v3.ProcessingMode mode_override = 9; } // The following are messages that are sent to the server. diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index ab8e0ffc0eba..113998c4082d 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -53,7 +53,7 @@ message RateLimitRequest { } // A response from a ShouldRateLimit call. -// [#next-free-field: 7] +// [#next-free-field: 8] message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse"; @@ -103,8 +103,15 @@ message RateLimitResponse { Unit unit = 2; } - // Cacheable quota for responses, see documentation for the :ref:`quota - // ` field. + // Cacheable quota for responses. + // Quota can be granted at different levels: either for each individual descriptor or for the whole descriptor set. + // This is a certain number of requests over a period of time. + // The client may cache this result and apply the effective RateLimitResponse to future matching + // requests without querying rate limit service. + // + // When quota expires due to timeout, a new RLS request will also be made. + // The implementation may choose to preemptively query the rate limit server for more quota on or + // before expiration or before the available quota runs out. // [#not-implemented-hide:] message Quota { // Number of matching requests granted in quota. Must be 1 or more. @@ -114,6 +121,15 @@ message RateLimitResponse { // Point in time at which the quota expires. google.protobuf.Timestamp valid_until = 2; } + + // The unique id that is associated with each Quota either at individual descriptor level or whole descriptor set level. + // + // For a matching policy with boolean logic, for example, match: "request.headers['environment'] == 'staging' || request.headers['environment'] == 'dev'"), + // the request_headers action produces a distinct list of descriptors for each possible value of the ‘environment’ header even though the granted quota is same. + // Thus, the client will use this id information (returned from RLS server) to correctly correlate the multiple descriptors/descriptor sets that have been granted with same quota (i.e., share the same quota among multiple descriptors or descriptor sets.) + // + // If id is empty, this id field will be ignored. If quota for the same id changes (e.g. due to configuration update), the old quota will be overridden by the new one. Shared quotas referenced by ID will still adhere to expiration after `valid_until`. + string id = 3; } // [#next-free-field: 6] @@ -133,12 +149,9 @@ message RateLimitResponse { // Duration until reset of the current limit window. google.protobuf.Duration duration_until_reset = 4; - // Quota granted for the descriptor. This is a certain number of requests over a period of time. - // The client may cache this result and apply the effective RateLimitResponse to future matching - // requests containing a matching descriptor without querying rate limit service. - // // Quota is available for a request if its descriptor set has cached quota available for all // descriptors. + // This is for each individual descriptor in the descriptor set. The client will perform matches for each individual descriptor against available per-descriptor quota. // // If quota is available, a RLS request will not be made and the quota will be reduced by 1 for // all matching descriptors. @@ -159,10 +172,6 @@ message RateLimitResponse { // If the server did not provide a quota, such as the quota message is empty for some of // the descriptors, then the request admission is determined by the // :ref:`overall_code `. - // - // When quota expires due to timeout, a new RLS request will also be made. - // The implementation may choose to preemptively query the rate limit server for more quota on or - // before expiration or before the available quota runs out. // [#not-implemented-hide:] Quota quota = 5; } @@ -193,4 +202,17 @@ message RateLimitResponse { // - :ref:`envoy.filters.network.ratelimit ` for network filter. // - :ref:`envoy.filters.thrift.rate_limit ` for Thrift filter. google.protobuf.Struct dynamic_metadata = 6; + + // Quota is available for a request if its entire descriptor set has cached quota available. + // This is a union of all descriptors in the descriptor set. Clients can use the quota for future matches if and only if the descriptor set matches what was sent in the request that originated this response. + // + // If quota is available, a RLS request will not be made and the quota will be reduced by 1. + // If quota is not available (i.e., a cached entry doesn't exist for a RLS descriptor set), a RLS request will be triggered. + // If the server did not provide a quota, such as the quota message is empty then the request admission is determined by the + // :ref:`overall_code `. + // + // If there is not sufficient quota and the cached entry exists for a RLS descriptor set is out-of-quota but not expired, + // the request will be treated as OVER_LIMIT. + // [#not-implemented-hide:] + Quota quota = 7; } diff --git a/api/envoy/watchdog/v3/BUILD b/api/envoy/watchdog/v3/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/watchdog/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3/README.md similarity index 100% rename from api/envoy/watchdog/v3alpha/README.md rename to api/envoy/watchdog/v3/README.md diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3/abort_action.proto similarity index 85% rename from api/envoy/watchdog/v3alpha/abort_action.proto rename to api/envoy/watchdog/v3/abort_action.proto index d6f34aa892cd..325c3d3dc7a8 100644 --- a/api/envoy/watchdog/v3alpha/abort_action.proto +++ b/api/envoy/watchdog/v3/abort_action.proto @@ -1,15 +1,14 @@ syntax = "proto3"; -package envoy.watchdog.v3alpha; +package envoy.watchdog.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_package = "io.envoyproxy.envoy.watchdog.v3"; option java_outer_classname = "AbortActionProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] diff --git a/api/versioning/BUILD b/api/versioning/BUILD index bf34f6f1eb43..8eeb536f66c8 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -19,6 +19,7 @@ proto_library( "//contrib/envoy/extensions/filters/network/sip_proxy/router/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/sip_proxy/v3alpha:pkg", "//contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha:pkg", + "//contrib/envoy/extensions/vcl/v3alpha:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", @@ -49,10 +50,10 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", + "//envoy/extensions/access_loggers/open_telemetry/v3:pkg", "//envoy/extensions/access_loggers/stream/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", + "//envoy/extensions/cache/simple_http_cache/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", @@ -68,14 +69,14 @@ proto_library( "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/common/matcher/action/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", + "//envoy/extensions/filters/http/admission_control/v3:pkg", "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", + "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", + "//envoy/extensions/filters/http/cache/v3:pkg", + "//envoy/extensions/filters/http/cdn_loop/v3:pkg", "//envoy/extensions/filters/http/composite/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", @@ -84,7 +85,7 @@ proto_library( "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", + "//envoy/extensions/filters/http/ext_proc/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", @@ -99,7 +100,7 @@ proto_library( "//envoy/extensions/filters/http/kill_request/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", + "//envoy/extensions/filters/http/oauth2/v3:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", @@ -127,14 +128,14 @@ proto_library( "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", + "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", @@ -149,6 +150,8 @@ proto_library( "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", "//envoy/extensions/matching/input_matchers/ip/v3:pkg", + "//envoy/extensions/network/dns_resolver/apple/v3:pkg", + "//envoy/extensions/network/dns_resolver/cares/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/quic/crypto_stream/v3:pkg", "//envoy/extensions/quic/proof_source/v3:pkg", @@ -167,9 +170,10 @@ proto_library( "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", + "//envoy/extensions/transport_sockets/s2a/v3:pkg", "//envoy/extensions/transport_sockets/starttls/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", + "//envoy/extensions/transport_sockets/tcp_stats/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/extensions/upstreams/http/generic/v3:pkg", "//envoy/extensions/upstreams/http/http/v3:pkg", @@ -177,14 +181,14 @@ proto_library( "//envoy/extensions/upstreams/http/v3:pkg", "//envoy/extensions/upstreams/tcp/generic/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", + "//envoy/extensions/watchdog/profile_action/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", + "//envoy/service/ext_proc/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", @@ -202,7 +206,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", + "//envoy/watchdog/v3:pkg", ], ) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 9820ff4cf993..0e1cdb0e2744 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -25,7 +25,7 @@ This is the preferred style of adding dependencies that use CMake for their buil 1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. -2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference +2. Add an `envoy_cmake` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference the source repository in step 1. 3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1 `external_deps` attribute. diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index 6ea24b9888cf..d7a17c2ff615 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -22,7 +22,8 @@ def envoy_cc_binary( stamped = False, deps = [], linkopts = [], - tags = []): + tags = [], + features = []): if not linkopts: linkopts = _envoy_linkopts() if stamped: @@ -42,6 +43,7 @@ def envoy_cc_binary( stamp = 1, deps = deps, tags = tags, + features = features, ) # Select the given values if exporting is enabled in the current build. diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 6f9c9d83e30b..9c5130f15e4b 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -51,9 +51,9 @@ def envoy_copts(repository, test = False): # debugging info detailing some 1600 test binaries would be wasteful. # targets listed in order from generic to increasing specificity. # Bazel adds an implicit -DNDEBUG for opt targets. - repository + "//bazel:opt_build": [] if test else ["-ggdb3", "-gsplit-dwarf"], + repository + "//bazel:opt_build": [] if test else ["-ggdb3"], repository + "//bazel:fastbuild_build": [], - repository + "//bazel:dbg_build": ["-ggdb3", "-gsplit-dwarf"], + repository + "//bazel:dbg_build": ["-ggdb3"], repository + "//bazel:windows_opt_build": [] if test else ["-Z7"], repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 799e60154afc..0cd48ba28620 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -169,10 +169,12 @@ def envoy_cc_test( linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ - repository + "//test:test_pch", repository + "//test:main", repository + "//test/test_common:test_version_linkstamp", - ], + ] + select({ + repository + "//bazel:clang_pch_build": [repository + "//test:test_pch"], + "//conditions:default": [], + }), # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. args = args + ["--gmock_default_mock_behavior=2"], diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch index e124821f9ace..8c27b8f327ce 100644 --- a/bazel/external/googleurl.patch +++ b/bazel/external/googleurl.patch @@ -2,22 +2,22 @@ # project using clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. diff --git a/base/compiler_specific.h b/base/compiler_specific.h -index 0cd36dc..8c4cbd4 100644 +index 6651220..a469c19 100644 --- a/base/compiler_specific.h +++ b/base/compiler_specific.h @@ -7,10 +7,6 @@ - + #include "build/build_config.h" - + -#if defined(COMPILER_MSVC) && !defined(__clang__) -#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" -#endif - - // Annotate a variable indicating it's ok if the variable is not used. - // (Typically used to silence a compiler warning when the assignment - // is important for some other reason.) -@@ -55,8 +51,12 @@ - // prevent code folding, see gurl_base::debug::Alias. + // This is a wrapper around `__has_cpp_attribute`, which can be used to test for + // the presence of an attribute. In case the compiler does not support this + // macro it will simply evaluate to 0. +@@ -75,8 +71,12 @@ + // prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h. // Use like: // void NOT_TAIL_CALLED FooBar(); -#if defined(__clang__) && __has_attribute(not_tail_called) @@ -30,10 +30,10 @@ index 0cd36dc..8c4cbd4 100644 #else #define NOT_TAIL_CALLED #endif -@@ -226,7 +226,9 @@ +@@ -273,7 +273,9 @@ #endif #endif - + -#if defined(__clang__) && __has_attribute(uninitialized) +#if defined(__clang__) +#if defined(__has_attribute) @@ -41,7 +41,7 @@ index 0cd36dc..8c4cbd4 100644 // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for // the specified variable. // Library-wide alternative is -@@ -257,6 +259,8 @@ +@@ -304,6 +306,8 @@ // E.g. platform, bot, benchmark or test name in patch description or next to // the attribute. #define STACK_UNINITIALIZED __attribute__((uninitialized)) @@ -50,13 +50,74 @@ index 0cd36dc..8c4cbd4 100644 #else #define STACK_UNINITIALIZED #endif +@@ -365,8 +369,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + #endif // defined(__clang_analyzer__) + + // Use nomerge attribute to disable optimization of merging multiple same calls. +-#if defined(__clang__) && __has_attribute(nomerge) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(nomerge) + #define NOMERGE [[clang::nomerge]] ++#endif ++#endif + #else + #define NOMERGE + #endif +@@ -392,8 +400,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // See also: + // https://clang.llvm.org/docs/AttributeReference.html#trivial-abi + // https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html +-#if defined(__clang__) && __has_attribute(trivial_abi) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(trivial_abi) + #define TRIVIAL_ABI [[clang::trivial_abi]] ++#endif ++#endif + #else + #define TRIVIAL_ABI + #endif +@@ -401,8 +413,12 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // Marks a member function as reinitializing a moved-from variable. + // See also + // https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization +-#if defined(__clang__) && __has_attribute(reinitializes) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(reinitializes) + #define REINITIALIZES_AFTER_MOVE [[clang::reinitializes]] ++#endif ++#endif + #else + #define REINITIALIZES_AFTER_MOVE + #endif + +# TODO(keith): Remove once bazel supports newer NDK versions https://github.com/bazelbuild/bazel/issues/12889 + +diff --git a/base/containers/checked_iterators.h b/base/containers/checked_iterators.h +index b5fe925..31aa81e 100644 +--- a/base/containers/checked_iterators.h ++++ b/base/containers/checked_iterators.h +@@ -237,9 +237,11 @@ using CheckedContiguousConstIterator = CheckedContiguousIterator; + // [3] https://wg21.link/pointer.traits.optmem + namespace std { + ++#ifdef SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR + template + struct __is_cpp17_contiguous_iterator<::gurl_base::CheckedContiguousIterator> + : true_type {}; ++#endif + + template + struct pointer_traits<::gurl_base::CheckedContiguousIterator> { # TODO(dio): Consider to remove the following patch when we have IDN-free optional build for URL # library from the upstream Chromium project. This is tracked in: # https://github.com/envoyproxy/envoy/issues/14743. diff --git a/url/BUILD b/url/BUILD -index f2ec8da..4e2d55b 100644 +index f2ec8da..df69661 100644 --- a/url/BUILD +++ b/url/BUILD @@ -52,3 +52,27 @@ cc_library( diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 662ae2cd1ef8..18b1c2275e21 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -56,8 +56,6 @@ genrule( quiche_common_copts = [ # hpack_huffman_decoder.cc overloads operator<<. "-Wno-unused-function", - # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. - "-Wno-invalid-offsetof", ] quiche_copts = select({ @@ -146,7 +144,10 @@ envoy_cc_library( hdrs = ["quiche/http2/http2_constants.h"], copts = quiche_copts, repository = "@envoy", - deps = [":http2_platform"], + deps = [ + ":http2_platform", + ":quiche_common_text_utils_lib", + ], ) envoy_cc_library( @@ -839,6 +840,7 @@ envoy_cc_library( deps = [ ":quiche_common_lib", ":quiche_common_platform", + ":quiche_common_text_utils_lib", ":spdy_core_header_storage_lib", ], ) @@ -1933,6 +1935,7 @@ envoy_cc_library( deps = [ ":quic_core_clock_lib", ":quic_core_crypto_certificate_view_lib", + ":quic_core_crypto_client_proof_source_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_hkdf_lib", ":quic_core_crypto_proof_source_lib", @@ -2079,6 +2082,7 @@ envoy_cc_library( tags = ["nofips"], visibility = ["//visibility:public"], deps = [ + ":quic_core_crypto_certificate_view_lib", ":quic_core_packets_lib", ":quic_core_versions_lib", ":quic_platform_base", @@ -2086,6 +2090,24 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_crypto_client_proof_source_lib", + srcs = [ + "quiche/quic/core/crypto/client_proof_source.cc", + ], + hdrs = [ + "quiche/quic/core/crypto/client_proof_source.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_crypto_proof_source_lib", + ":quic_platform_base", + ], +) + envoy_cc_library( name = "quic_core_crypto_random_lib", srcs = ["quiche/quic/core/crypto/quic_random.cc"], @@ -2289,6 +2311,21 @@ envoy_cc_library( deps = [":quic_core_types_lib"], ) +envoy_cc_library( + name = "quic_core_http_capsule_lib", + srcs = ["quiche/quic/core/http/capsule.cc"], + hdrs = ["quiche/quic/core/http/capsule.h"], + copts = quiche_copts, + repository = "@envoy", + deps = [ + ":quic_core_buffer_allocator_lib", + ":quic_core_data_lib", + ":quic_core_http_http_frames_lib", + ":quic_core_types_lib", + ":quic_platform_base", + ], +) + envoy_cc_library( name = "quic_core_http_client_lib", srcs = [ @@ -2387,6 +2424,7 @@ envoy_cc_library( repository = "@envoy", tags = ["nofips"], deps = [ + ":quic_core_http_http_constants_lib", ":quic_core_types_lib", ":quic_platform_base", ":spdy_core_framer_lib", @@ -2430,6 +2468,7 @@ envoy_cc_library( "quiche/quic/core/http/quic_spdy_session.cc", "quiche/quic/core/http/quic_spdy_stream.cc", "quiche/quic/core/http/web_transport_http3.cc", + "quiche/quic/core/http/web_transport_stream_adapter.cc", ], hdrs = [ "quiche/quic/core/http/quic_headers_stream.h", @@ -2440,6 +2479,7 @@ envoy_cc_library( "quiche/quic/core/http/quic_spdy_session.h", "quiche/quic/core/http/quic_spdy_stream.h", "quiche/quic/core/http/web_transport_http3.h", + "quiche/quic/core/http/web_transport_stream_adapter.h", ], copts = quiche_copts, repository = "@envoy", @@ -2449,6 +2489,7 @@ envoy_cc_library( ":quic_core_connection_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_error_codes_lib", + ":quic_core_http_capsule_lib", ":quic_core_http_header_list_lib", ":quic_core_http_http_constants_lib", ":quic_core_http_http_decoder_lib", @@ -2467,7 +2508,6 @@ envoy_cc_library( ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_core_web_transport_interface_lib", - ":quic_core_web_transport_stream_adapter", ":quic_platform_base", ":quic_platform_mem_slice_storage", ":spdy_core_framer_lib", @@ -3150,19 +3190,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "quic_core_web_transport_stream_adapter", - srcs = ["quiche/quic/core/web_transport_stream_adapter.cc"], - hdrs = ["quiche/quic/core/web_transport_stream_adapter.h"], - copts = quiche_copts, - repository = "@envoy", - tags = ["nofips"], - deps = [ - ":quic_core_session_lib", - ":quic_core_web_transport_interface_lib", - ], -) - envoy_cc_library( name = "quic_core_server_id_lib", srcs = ["quiche/quic/core/quic_server_id.cc"], diff --git a/bazel/external/rules_cc.patch b/bazel/external/rules_cc.patch deleted file mode 100644 index 057545c920f3..000000000000 --- a/bazel/external/rules_cc.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/cc/private/toolchain/unix_cc_toolchain_config.bzl b/cc/private/toolchain/unix_cc_toolchain_config.bzl -index c3cf3ba..d207bbd 100644 ---- a/cc/private/toolchain/unix_cc_toolchain_config.bzl -+++ b/cc/private/toolchain/unix_cc_toolchain_config.bzl -@@ -313,7 +313,7 @@ def _impl(ctx): - ], - flag_groups = [ - flag_group( -- flags = ["-gsplit-dwarf"], -+ flags = ["-gsplit-dwarf", "-g"], - expand_if_available = "per_object_debug_info_file", - ), - ], diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index 5bba6a5d3f78..ee56998db90f 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -11,9 +11,9 @@ cc_library( ], hdrs = glob([ - "wee8/include/**/*.h", - "wee8/src/**/*.h", - "wee8/third_party/wasm-api/wasm.hh", + "include/**/*.h", + "src/**/*.h", + "third_party/wasm-api/wasm.hh", ]), copts = [ "-Wno-range-loop-analysis", @@ -22,9 +22,9 @@ cc_library( "V8_ENABLE_WEBASSEMBLY", ], includes = [ - "wee8", - "wee8/include", - "wee8/third_party", + ".", + "include", + "third_party", ], tags = ["skip_on_windows"], visibility = ["//visibility:public"], @@ -33,8 +33,8 @@ cc_library( genrule( name = "build", srcs = glob( - ["wee8/**"], - exclude = ["wee8/out/**"], + ["**"], + exclude = ["out/**"], ), outs = [ "libwee8.a", diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index 7b175a1f20aa..8c92818102fe 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -21,8 +21,8 @@ MSYS_NT-*-x86_64) esac # Bazel magic. -ROOT=$$(dirname $(rootpath wee8/BUILD.gn))/.. -pushd $$ROOT/wee8 +ROOT=$$(dirname $(rootpath BUILD.gn)) +pushd $$ROOT # Clean after previous build. rm -rf out/wee8 @@ -148,4 +148,4 @@ fi # Move compiled library to the expected destinations. popd -mv $$ROOT/wee8/out/wee8/obj/libwee8.a $(execpath libwee8.a) +mv $$ROOT/out/wee8/obj/libwee8.a $(execpath libwee8.a) diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch index 5dfce7b799e1..c15f5d867a3b 100644 --- a/bazel/external/wee8.patch +++ b/bazel/external/wee8.patch @@ -1,9 +1,10 @@ # 1. Fix linking with unbundled toolchain on macOS. # 2. Increase VSZ limit to 64 TiB (allows us to start up to 6,553 VMs). # 3. Fix linking with MSAN. ---- wee8/build/toolchain/gcc_toolchain.gni -+++ wee8/build/toolchain/gcc_toolchain.gni -@@ -376,6 +376,8 @@ template("gcc_toolchain") { +# 4. Fix build with LLVM/Clang versions older than 13.0.0. +--- build/toolchain/gcc_toolchain.gni ++++ build/toolchain/gcc_toolchain.gni +@@ -381,6 +381,8 @@ template("gcc_toolchain") { # AIX does not support either -D (deterministic output) or response # files. command = "$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}" @@ -12,7 +13,7 @@ } else { rspfile = "{{output}}.rsp" rspfile_content = "{{inputs}}" -@@ -565,7 +567,7 @@ template("gcc_toolchain") { +@@ -595,7 +597,7 @@ template("gcc_toolchain") { start_group_flag = "" end_group_flag = "" @@ -21,9 +22,9 @@ # the "--start-group .. --end-group" feature isn't available on the aix ld. start_group_flag = "-Wl,--start-group" end_group_flag = "-Wl,--end-group " ---- wee8/src/objects/backing-store.cc -+++ wee8/src/objects/backing-store.cc -@@ -53,7 +53,7 @@ constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB +--- src/objects/backing-store.cc ++++ src/objects/backing-store.cc +@@ -47,7 +47,7 @@ constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB // RISC-V64 has a user space of 256GB on the Sv39 scheme. constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB #elif V8_TARGET_ARCH_64_BIT @@ -32,9 +33,9 @@ #else constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB #endif ---- wee8/build/config/sanitizers/sanitizers.gni -+++ wee8/build/config/sanitizers/sanitizers.gni -@@ -153,7 +153,7 @@ if (!is_a_target_toolchain) { +--- build/config/sanitizers/sanitizers.gni ++++ build/config/sanitizers/sanitizers.gni +@@ -158,7 +158,7 @@ if (!is_a_target_toolchain) { # standard system libraries. We have instrumented system libraries for msan, # which requires them to prevent false positives. # TODO(thakis): Maybe remove this variable. @@ -43,7 +44,7 @@ # Whether we are doing a fuzzer build. Normally this should be checked instead # of checking "use_libfuzzer || use_afl" because often developers forget to -@@ -202,8 +202,7 @@ assert(!using_sanitizer || is_clang, +@@ -207,8 +207,7 @@ assert(!using_sanitizer || is_clang, assert(!is_cfi || is_clang, "is_cfi requires setting is_clang = true in 'gn args'") @@ -53,3 +54,158 @@ if (use_libfuzzer && (is_linux || is_chromeos)) { if (is_asan) { +--- build/config/compiler/BUILD.gn ++++ build/config/compiler/BUILD.gn +@@ -1253,19 +1253,12 @@ config("compiler_deterministic") { + # different build directory like "out/feature_a" and "out/feature_b" if + # we build same files with same compile flag. + # Other paths are already given in relative, no need to normalize them. +- if (is_nacl) { +- # TODO(https://crbug.com/1231236): Use -ffile-compilation-dir= here. +- cflags += [ +- "-Xclang", +- "-fdebug-compilation-dir", +- "-Xclang", +- ".", +- ] +- } else { +- # -ffile-compilation-dir is an alias for both -fdebug-compilation-dir= +- # and -fcoverage-compilation-dir=. +- cflags += [ "-ffile-compilation-dir=." ] +- } ++ cflags += [ ++ "-Xclang", ++ "-fdebug-compilation-dir", ++ "-Xclang", ++ ".", ++ ] + if (!is_win) { + # We don't use clang -cc1as on Windows (yet? https://crbug.com/762167) + asmflags = [ "-Wa,-fdebug-compilation-dir,." ] +--- build/config/sanitizers/BUILD.gn ++++ build/config/sanitizers/BUILD.gn +@@ -272,11 +272,11 @@ config("asan_flags") { + if (is_asan) { + cflags += [ "-fsanitize=address" ] + if (is_win) { +- if (!defined(asan_win_blocklist_path)) { +- asan_win_blocklist_path = ++ if (!defined(asan_win_blacklist_path)) { ++ asan_win_blacklist_path = + rebase_path("//tools/memory/asan/blocklist_win.txt", root_build_dir) + } +- cflags += [ "-fsanitize-ignorelist=$asan_win_blocklist_path" ] ++ cflags += [ "-fsanitize-blacklist=$asan_win_blacklist_path" ] + } + } + } +@@ -306,13 +306,13 @@ config("link_shared_library") { + config("cfi_flags") { + cflags = [] + if (is_cfi && current_toolchain == default_toolchain) { +- if (!defined(cfi_ignorelist_path)) { +- cfi_ignorelist_path = ++ if (!defined(cfi_blacklist_path)) { ++ cfi_blacklist_path = + rebase_path("//tools/cfi/ignores.txt", root_build_dir) + } + cflags += [ + "-fsanitize=cfi-vcall", +- "-fsanitize-ignorelist=$cfi_ignorelist_path", ++ "-fsanitize-blacklist=$cfi_blacklist_path", + ] + + if (use_cfi_cast) { +@@ -409,14 +409,14 @@ config("msan_flags") { + if (is_msan) { + assert(is_linux || is_chromeos, + "msan only supported on linux x86_64/ChromeOS") +- if (!defined(msan_ignorelist_path)) { +- msan_ignorelist_path = +- rebase_path("//tools/msan/ignorelist.txt", root_build_dir) ++ if (!defined(msan_blacklist_path)) { ++ msan_blacklist_path = ++ rebase_path("//tools/msan/blacklist.txt", root_build_dir) + } + cflags = [ + "-fsanitize=memory", + "-fsanitize-memory-track-origins=$msan_track_origins", +- "-fsanitize-ignorelist=$msan_ignorelist_path", ++ "-fsanitize-blacklist=$msan_blacklist_path", + ] + } + } +@@ -424,13 +424,13 @@ config("msan_flags") { + config("tsan_flags") { + if (is_tsan) { + assert(is_linux || is_chromeos, "tsan only supported on linux x86_64") +- if (!defined(tsan_ignorelist_path)) { +- tsan_ignorelist_path = ++ if (!defined(tsan_blacklist_path)) { ++ tsan_blacklist_path = + rebase_path("//tools/memory/tsan_v2/ignores.txt", root_build_dir) + } + cflags = [ + "-fsanitize=thread", +- "-fsanitize-ignorelist=$tsan_ignorelist_path", ++ "-fsanitize-blacklist=$tsan_blacklist_path", + ] + } + } +@@ -438,8 +438,8 @@ config("tsan_flags") { + config("ubsan_flags") { + cflags = [] + if (is_ubsan) { +- if (!defined(ubsan_ignorelist_path)) { +- ubsan_ignorelist_path = ++ if (!defined(ubsan_blacklist_path)) { ++ ubsan_blacklist_path = + rebase_path("//tools/ubsan/ignorelist.txt", root_build_dir) + } + cflags += [ +@@ -456,7 +456,7 @@ config("ubsan_flags") { + "-fsanitize=signed-integer-overflow", + "-fsanitize=unreachable", + "-fsanitize=vla-bound", +- "-fsanitize-ignorelist=$ubsan_ignorelist_path", ++ "-fsanitize-blacklist=$ubsan_blacklist_path", + ] + + # Chromecast ubsan builds fail to compile with these +@@ -486,8 +486,8 @@ config("ubsan_no_recover") { + + config("ubsan_security_flags") { + if (is_ubsan_security) { +- if (!defined(ubsan_security_ignorelist_path)) { +- ubsan_security_ignorelist_path = ++ if (!defined(ubsan_security_blacklist_path)) { ++ ubsan_security_blacklist_path = + rebase_path("//tools/ubsan/security_ignorelist.txt", root_build_dir) + } + cflags = [ +@@ -495,7 +495,7 @@ config("ubsan_security_flags") { + "-fsanitize=shift", + "-fsanitize=signed-integer-overflow", + "-fsanitize=vla-bound", +- "-fsanitize-ignorelist=$ubsan_security_ignorelist_path", ++ "-fsanitize-blacklist=$ubsan_security_blacklist_path", + ] + } + } +@@ -508,13 +508,13 @@ config("ubsan_null_flags") { + + config("ubsan_vptr_flags") { + if (is_ubsan_vptr) { +- if (!defined(ubsan_vptr_ignorelist_path)) { +- ubsan_vptr_ignorelist_path = ++ if (!defined(ubsan_vptr_blacklist_path)) { ++ ubsan_vptr_blacklist_path = + rebase_path("//tools/ubsan/vptr_ignorelist.txt", root_build_dir) + } + cflags = [ + "-fsanitize=vptr", +- "-fsanitize-ignorelist=$ubsan_vptr_ignorelist_path", ++ "-fsanitize-blacklist=$ubsan_vptr_blacklist_path", + ] + } + } diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 044f4562ee4d..c78104c34ad6 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -113,7 +113,6 @@ envoy_cmake( "SXG_WITH_CERT_CHAIN": "off", "RUN_TEST": "off", "CMAKE_INSTALL_LIBDIR": "lib", - "CMAKE_TRY_COMPILE_TARGET_TYPE": "STATIC_LIBRARY", }, lib_source = "@com_github_google_libsxg//:all", out_static_libs = ["libsxg.a"], @@ -278,45 +277,73 @@ envoy_cmake( lib_source = "@org_llvm_llvm//:all", out_static_libs = select({ "//conditions:default": [ - # Order from llvm-config --libnames asmparser core debuginfodwarf - # engine lto mcparser mirparser orcjit passes runtimedyld - # support x86asmparser x86desc + # This list must be updated when the bazel llvm version is updated + # (in `bazel/repository_locations.bzl`) + # + # The list can be regenerated by compiling the correct/updated llvm version + # from sources and running: + # + # `llvm-config --libnames` + # + "libLLVMWindowsManifest.a", + "libLLVMXRay.a", + "libLLVMLibDriver.a", + "libLLVMDlltoolDriver.a", + "libLLVMCoverage.a", + "libLLVMLineEditor.a", + "libLLVMX86Disassembler.a", + "libLLVMX86AsmParser.a", + "libLLVMX86CodeGen.a", + "libLLVMX86Desc.a", + "libLLVMX86Info.a", "libLLVMOrcJIT.a", - "libLLVMOrcError.a", + "libLLVMMCJIT.a", "libLLVMJITLink.a", - "libLLVMMIRParser.a", + "libLLVMOrcTargetProcess.a", + "libLLVMOrcShared.a", + "libLLVMInterpreter.a", + "libLLVMExecutionEngine.a", + "libLLVMRuntimeDyld.a", + "libLLVMSymbolize.a", + "libLLVMDebugInfoPDB.a", + "libLLVMDebugInfoGSYM.a", + "libLLVMOption.a", + "libLLVMObjectYAML.a", + "libLLVMMCA.a", + "libLLVMMCDisassembler.a", "libLLVMLTO.a", "libLLVMPasses.a", + "libLLVMCFGuard.a", + "libLLVMCoroutines.a", "libLLVMObjCARCOpts.a", + "libLLVMHelloNew.a", "libLLVMipo.a", - "libLLVMInstrumentation.a", "libLLVMVectorize.a", "libLLVMLinker.a", - "libLLVMIRReader.a", - "libLLVMX86Disassembler.a", - "libLLVMX86AsmParser.a", - "libLLVMX86CodeGen.a", - "libLLVMCFGuard.a", + "libLLVMInstrumentation.a", + "libLLVMFrontendOpenMP.a", + "libLLVMFrontendOpenACC.a", + "libLLVMExtensions.a", + "libLLVMDWARFLinker.a", "libLLVMGlobalISel.a", - "libLLVMSelectionDAG.a", + "libLLVMMIRParser.a", "libLLVMAsmPrinter.a", + "libLLVMDebugInfoDWARF.a", + "libLLVMSelectionDAG.a", "libLLVMCodeGen.a", + "libLLVMIRReader.a", + "libLLVMAsmParser.a", + "libLLVMInterfaceStub.a", + "libLLVMFileCheck.a", + "libLLVMFuzzMutate.a", + "libLLVMTarget.a", "libLLVMScalarOpts.a", "libLLVMInstCombine.a", "libLLVMAggressiveInstCombine.a", "libLLVMTransformUtils.a", "libLLVMBitWriter.a", - "libLLVMX86Desc.a", - "libLLVMMCDisassembler.a", - "libLLVMX86Utils.a", - "libLLVMX86Info.a", - "libLLVMMCJIT.a", - "libLLVMExecutionEngine.a", - "libLLVMTarget.a", "libLLVMAnalysis.a", "libLLVMProfileData.a", - "libLLVMRuntimeDyld.a", - "libLLVMDebugInfoDWARF.a", "libLLVMObject.a", "libLLVMTextAPI.a", "libLLVMMCParser.a", @@ -324,7 +351,6 @@ envoy_cmake( "libLLVMDebugInfoCodeView.a", "libLLVMDebugInfoMSF.a", "libLLVMBitReader.a", - "libLLVMAsmParser.a", "libLLVMCore.a", "libLLVMRemarks.a", "libLLVMBitstreamReader.a", diff --git a/bazel/foreign_cc/vpp_vcl.patch b/bazel/foreign_cc/vpp_vcl.patch new file mode 100644 index 000000000000..0d440fb02945 --- /dev/null +++ b/bazel/foreign_cc/vpp_vcl.patch @@ -0,0 +1,51 @@ +# Not a git repo so embed version +--- src/CMakeLists.txt ++++ src/CMakeLists.txt +@@ -42,12 +42,8 @@ include(cmake/ccache.cmake) + ############################################################################## + # VPP Version + ############################################################################## +-execute_process( +- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} +- COMMAND scripts/version +- OUTPUT_VARIABLE VPP_VERSION +- OUTPUT_STRIP_TRAILING_WHITESPACE +-) ++ ++set(VPP_VERSION 21.10-rc0~334-g596c45b22) + string(REPLACE "-" ";" VPP_LIB_VERSION ${VPP_VERSION}) + list(GET VPP_LIB_VERSION 0 VPP_LIB_VERSION) + +@@ -179,7 +179,7 @@ if(VPP_HOST_TOOLS_ONLY) + elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") + find_package(OpenSSL) + set(SUBDIRS +- vppinfra svm vlib vlibmemory vlibapi vnet vpp vat vat2 vcl plugins ++ vppinfra svm vlib vlibmemory vlibapi vnet vpp vat vat2 vcl + vpp-api tools/vppapigen tools/g2 tools/perftool cmake pkg + tools/appimage + ) + +--- src/cmake/ccache.cmake ++++ src/cmake/ccache.cmake +@@ -14,7 +14,7 @@ + ############################################################################## + # ccache + ############################################################################## +-option(VPP_USE_CCACHE "Use ccache compiler cache." ON) ++option(VPP_USE_CCACHE "Use ccache compiler cache." OFF) + if(VPP_USE_CCACHE) + find_program(CCACHE_FOUND ccache) + message(STATUS "Looking for ccache") + +--- src/cmake/library.cmake ++++ src/cmake/library.cmake +@@ -24,7 +24,7 @@ macro(add_vpp_library lib) + set_target_properties(${lo} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_options(${lo} PUBLIC ${VPP_DEFAULT_MARCH_FLAGS}) + +- add_library(${lib} SHARED) ++ add_library(${lib} STATIC) + target_sources(${lib} PRIVATE $) + + if(VPP_LIB_VERSION) diff --git a/bazel/foreign_cc/zlib_ng.patch b/bazel/foreign_cc/zlib_ng.patch index 77b04ef09496..b4b73279da3f 100644 --- a/bazel/foreign_cc/zlib_ng.patch +++ b/bazel/foreign_cc/zlib_ng.patch @@ -1,12 +1,13 @@ + # Add support for compiling to WebAssembly using Emscripten. # https://github.com/zlib-ng/zlib-ng/pull/794 diff --git a/cmake/detect-arch.c b/cmake/detect-arch.c -index 5715535..2137691 100644 + --- a/cmake/detect-arch.c +++ b/cmake/detect-arch.c -@@ -93,6 +93,10 @@ - #elif defined(__THW_RS6000) - #error archfound rs6000 +@@ -101,6 +101,10 @@ + #error archfound riscv32 + #endif +// Emscripten (WebAssembly) +#elif defined(__EMSCRIPTEN__) @@ -16,16 +17,16 @@ index 5715535..2137691 100644 #else #error archfound unrecognized diff --git a/cmake/detect-arch.cmake b/cmake/detect-arch.cmake -index b80d666..c6cc214 100644 + --- a/cmake/detect-arch.cmake +++ b/cmake/detect-arch.cmake -@@ -85,6 +85,9 @@ elseif("${ARCH}" MATCHES "parisc") +@@ -85,6 +85,9 @@ elseif("${ARCH}" MATCHES "rs6000") set(BASEARCH "rs6000") set(BASEARCH_RS6000_FOUND TRUE) +elseif("${ARCH}" MATCHES "wasm32") + set(BASEARCH "wasm32") + set(BASEARCH_WASM32_FOUND TRUE) - else() - set(BASEARCH "x86") - set(BASEARCH_X86_FOUND TRUE) + elseif("${ARCH}" MATCHES "riscv(32|64)") + set(BASEARCH "riscv") + set(BASEARCH_RISCV_FOUND TRUE) diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index 28f37adfe55c..cfa9e7be44e9 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -13,17 +13,9 @@ def _genrule_repository(ctx): if patch_result.return_code != 0: fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr)) - # https://github.com/bazelbuild/bazel/issues/3766 - genrule_cmd_file = Label("@envoy//bazel").relative(str(ctx.attr.genrule_cmd_file)) - ctx.symlink(genrule_cmd_file, "_envoy_genrule_cmd.genrule_cmd") - cat_genrule_cmd = ctx.execute(["cat", "_envoy_genrule_cmd.genrule_cmd"]) - if cat_genrule_cmd.return_code != 0: - fail("Failed to read genrule command %r: %s" % ( - genrule_cmd_file, - cat_genrule_cmd.stderr, - )) - + genrule_cmd = ctx.read(ctx.attr.genrule_cmd_file) ctx.file("WORKSPACE", "workspace(name=%r)" % (ctx.name,)) + ctx.delete("BUILD.bazel") ctx.symlink(ctx.attr.build_file, "BUILD.bazel") # Inject the genrule_cmd content into a .bzl file that can be loaded @@ -32,8 +24,8 @@ def _genrule_repository(ctx): ctx.file("genrule_cmd.bzl", """ _GENRULE_CMD = {%r: %r} def genrule_cmd(label): - return _GENRULE_CMD[label] -""" % (str(genrule_cmd_file), cat_genrule_cmd.stdout)) + return _GENRULE_CMD[Label(label)] +""" % (ctx.attr.genrule_cmd_file, genrule_cmd)) genrule_repository = repository_rule( attrs = { diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch index e786c7ebe146..a8c5e959ddbe 100644 --- a/bazel/protobuf.patch +++ b/bazel/protobuf.patch @@ -1,34 +1,24 @@ -# https://github.com/protocolbuffers/protobuf/pull/6720 -diff --git a/third_party/BUILD b/third_party/BUILD -new file mode 100644 -index 0000000000..b66101a39a ---- /dev/null -+++ b/third_party/BUILD -@@ -0,0 +1 @@ -+exports_files(["six.BUILD", "zlib.BUILD"]) - -# patching for zlib binding diff --git a/BUILD b/BUILD -index efc3d8e7f..746ad4851 100644 +index 1690d4219..8a7f1bf14 100644 --- a/BUILD +++ b/BUILD -@@ -24,7 +24,7 @@ config_setting( +@@ -19,7 +19,7 @@ exports_files(["LICENSE"]) # ZLIB configuration ################################################################################ - + -ZLIB_DEPS = ["@zlib//:zlib"] +ZLIB_DEPS = ["//external:zlib"] - + ################################################################################ # Protobuf Runtime Library diff --git a/python/google/protobuf/__init__.py b/python/google/protobuf/__init__.py -index 97ac28028..8b7585d9d 100644 +index 68087e550..0a2d63e73 100644 --- a/python/google/protobuf/__init__.py +++ b/python/google/protobuf/__init__.py @@ -31,3 +31,9 @@ # Copyright 2007 Google Inc. All Rights Reserved. - - __version__ = '3.16.0' + + __version__ = '3.19.0' + +if __name__ != '__main__': + try: diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c642f6e37590..0fd7212d032b 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -232,12 +232,9 @@ def envoy_dependencies(skip_targets = []): patch_args = ["-p1"], patches = ["@envoy//bazel/external:envoy_build_tools.patch"], ) - external_http_archive( - "rules_cc", - patch_args = ["-p1"], - patches = ["@envoy//bazel/external:rules_cc.patch"], - ) + external_http_archive("rules_cc") external_http_archive("rules_pkg") + _com_github_fdio_vpp_vcl() # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() @@ -1107,6 +1104,13 @@ filegroup( build_file_content = BUILD_ALL_CONTENT, ) +def _com_github_fdio_vpp_vcl(): + external_http_archive( + name = "com_github_fdio_vpp_vcl", + build_file_content = BUILD_ALL_CONTENT, + patches = ["@envoy//bazel/foreign_cc:vpp_vcl.patch"], + ) + def _foreign_cc_dependencies(): external_http_archive("rules_foreign_cc") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index bc674cb74ca1..baf290abf6bc 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -15,10 +15,10 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Gazelle", project_desc = "Bazel BUILD file generator for Go projects", project_url = "https://github.com/bazelbuild/bazel-gazelle", - version = "0.22.2", - sha256 = "b85f48fa105c4403326e9525ad2b2cc437babaa6e15a3fc0b1dbab0ab064bc7c", + version = "0.24.0", + sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"], - release_date = "2020-10-02", + release_date = "2021-10-11", use_category = ["build"], ), bazel_toolchains = dict( @@ -39,21 +39,21 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Apple Rules for Bazel", project_desc = "Bazel rules for Apple platforms", project_url = "https://github.com/bazelbuild/rules_apple", - version = "0.31.2", - sha256 = "c84962b64d9ae4472adfb01ec2cf1aa73cb2ee8308242add55fa7cc38602d882", + version = "0.31.3", + sha256 = "0052d452af7742c8f3a4e0929763388a66403de363775db7e90adecb2ba4944b", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], - release_date = "2021-05-07", + release_date = "2021-08-08", use_category = ["build"], ), rules_fuzzing = dict( project_name = "Fuzzing Rules for Bazel", project_desc = "Bazel rules for fuzz tests", project_url = "https://github.com/bazelbuild/rules_fuzzing", - version = "0.1.3", - sha256 = "ce99c277c4e9e21f77222757936bf7ffb8823911497db84bdd57a796588fcf01", + version = "0.2.0", + sha256 = "9b688a77b930e1842312d37b00fbb796b96323a2eb8362b2cfb68e7d6e74f860", strip_prefix = "rules_fuzzing-{version}", urls = ["https://github.com/bazelbuild/rules_fuzzing/archive/v{version}.tar.gz"], - release_date = "2021-04-01", + release_date = "2021-07-12", use_category = ["test_only"], implied_untracked_deps = [ # This is a repository rule generated to define an OSS-Fuzz fuzzing @@ -65,11 +65,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "envoy-build-tools", project_desc = "Common build tools shared by the Envoy/UDPA ecosystem", project_url = "https://github.com/envoyproxy/envoy-build-tools", - version = "a9db6cde8e8d4404621e9631bba237c45e04ceed", - sha256 = "f2c7d4d5c4b18d85321d4c43686b8c5b5473a4ad9b1f8f4245d863a10367a9c0", + version = "55a7bbe700586729bd38231a9a6f3dcd1ff85e7d", + sha256 = "11893be9f0334a7e12ffc04b3b034dffe0bb5516d36654011532136c7929ae27", strip_prefix = "envoy-build-tools-{version}", urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"], - release_date = "2021-09-17", + release_date = "2021-09-28", use_category = ["build"], ), boringssl = dict( @@ -141,12 +141,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "xxHash", project_desc = "Extremely fast hash algorithm", project_url = "https://github.com/Cyan4973/xxHash", - version = "0.7.3", - sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", + version = "0.8.0", + sha256 = "7054c3ebd169c97b64a92d7b994ab63c70dd53a06974f1f630ab782c28db0f4f", strip_prefix = "xxHash-{version}", urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2020-03-05", + release_date = "2020-07-27", cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( @@ -175,6 +175,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"], release_date = "2011-04-16", use_category = ["other"], + cpe = "cpe:2.3:a:tclap_project:tclap:*", ), com_github_fmtlib_fmt = dict( project_name = "fmt", @@ -240,11 +241,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "gperftools", project_desc = "tcmalloc and profiling libraries", project_url = "https://github.com/gperftools/gperftools", - version = "2.8", - sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", + version = "2.9.1", + sha256 = "ea566e528605befb830671e359118c2da718f721c27225cbbc93858c7520fee3", strip_prefix = "gperftools-{version}", urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"], - release_date = "2020-07-06", + release_date = "2021-03-03", use_category = ["dataplane_core", "controlplane"], cpe = "cpe:2.3:a:gperftools_project:gperftools:*", ), @@ -264,17 +265,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "libipp-crypto", project_desc = "Intel® Integrated Performance Primitives Cryptography", project_url = "https://github.com/intel/ipp-crypto", - # The required BoringSSL compatibility patches are present in the - # "development" branch, but not yet in the release branch. The target - # release for the patches is 2021.4. - version = "4048dac1617bf33ff85d37a4b8f68f21342263b7", - sha256 = "4316589a7c0afa5788b84b04510283dab0979bf6d3b0aa0e4ef0fe540675af5e", - strip_prefix = "ipp-crypto-{version}", - urls = ["https://github.com/intel/ipp-crypto/archive/{version}.tar.gz"], - release_date = "2021-07-07", + version = "2021.4", + sha256 = "23e250dcf281aa00d186be8dc4e34fa8fc5c95a0895694cd00b33f18af5d60c7", + strip_prefix = "ipp-crypto-ippcp_{version}", + urls = ["https://github.com/intel/ipp-crypto/archive/ippcp_{version}.tar.gz"], + release_date = "2021-10-01", use_category = ["dataplane_ext"], extensions = ["envoy.tls.key_providers.cryptomb"], - cpe = "N/A", + cpe = "cpe:2.3:a:intel:cryptography_for_intel_integrated_performance_primitives:*", ), com_github_luajit_luajit = dict( project_name = "LuaJIT", @@ -308,12 +306,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Nghttp2", project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C", project_url = "https://nghttp2.org", - version = "1.42.0", - sha256 = "884d18a0158908125d58b1b61d475c0325e5a004e3d61a56b5fcc55d5f4b7af5", + version = "1.46.0", + sha256 = "4b6d11c85f2638531d1327fe1ed28c1e386144e8841176c04153ed32a4878208", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-11-23", + release_date = "2021-10-19", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( @@ -358,7 +356,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( use_category = ["observability_ext"], extensions = ["envoy.tracers.skywalking"], release_date = "2021-06-07", - cpe = "N/A", + cpe = "cpe:2.3:a:apache:skywalking:*", ), com_github_skyapm_cpp2sky = dict( project_name = "cpp2sky", @@ -457,12 +455,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "zlib-ng", project_desc = "zlib fork (higher performance)", project_url = "https://github.com/zlib-ng/zlib-ng", - version = "b802a303ce8b6c86fbe3f93d59e0a82333768c0c", - sha256 = "e051eade607ecbbfa2c7ed3087fe53e5d3a58325375e1e28209594138e4aa93d", + version = "2.0.5", + sha256 = "eca3fe72aea7036c31d00ca120493923c4d5b99fe02e6d3322f7c88dbdcd0085", strip_prefix = "zlib-ng-{version}", urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-10-18", + release_date = "2021-06-25", cpe = "N/A", ), com_github_jbeder_yaml_cpp = dict( @@ -550,15 +548,15 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "nlohmann JSON", project_desc = "Fast JSON parser/generator for C++", project_url = "https://nlohmann.github.io/json", - version = "3.9.1", - sha256 = "4cf0df69731494668bdd6460ed8cb269b68de9c19ad8c27abc24cd72605b2d5b", + version = "3.10.4", + sha256 = "1155fd1a83049767360e9a120c43c578145db3204d2b309eba49fbbedd0f4ed3", strip_prefix = "json-{version}", urls = ["https://github.com/nlohmann/json/archive/v{version}.tar.gz"], # This will be a replacement for rapidJSON used in extensions and may also be a fast # replacement for protobuf JSON. use_category = ["controlplane", "dataplane_core"], - release_date = "2020-08-06", - cpe = "cpe:2.3:a:json_project:json:*", + release_date = "2021-10-16", + cpe = "cpe:2.3:a:json-for-modern-cpp_project:json-for-modern-cpp:*", ), # This is an external dependency needed while running the # envoy docker image. A bazel target has been created since @@ -622,30 +620,31 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"], release_date = "2020-09-10", use_category = ["test_only"], + cpe = "cpe:2.3:a:google:google_test:*", ), com_google_protobuf = dict( project_name = "Protocol Buffers", project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data", project_url = "https://developers.google.com/protocol-buffers", - version = "3.16.0", - sha256 = "d7371dc2d46fddac1af8cb27c0394554b068768fc79ecaf5be1a1863e8ff3392", + version = "3.19.0", + sha256 = "7b8d3ac3d6591ce9d25f90faba80da78d0ef620fda711702367f61a40ba98429", strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2021-05-07", + release_date = "2021-10-20", cpe = "cpe:2.3:a:google:protobuf:*", ), grpc_httpjson_transcoding = dict( project_name = "grpc-httpjson-transcoding", project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC", project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", - version = "f1591a41318104b7e27a26be12f502b106a16256", - sha256 = "440baf465096ce1a7152c6d1090a70e871e5ca93b23c6cf9f8cd79f028bf5bb8", + version = "3127eeaf889d48b5d2cd870fd910f1ae3e7abca4", + sha256 = "f98da3fe9b2539c9fc9b3884e01baa8d2e19ed016bc5f41bed2998781c96ac63", strip_prefix = "grpc-httpjson-transcoding-{version}", urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.grpc_json_transcoder"], - release_date = "2021-05-08", + release_date = "2021-09-22", cpe = "N/A", ), io_bazel_rules_go = dict( @@ -668,12 +667,10 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "C++ rules for Bazel", project_desc = "Bazel rules for the C++ language", project_url = "https://github.com/bazelbuild/rules_cc", - # TODO(lizan): pin to a point releases when there's a released version. - version = "dd2758b96dc8f9f4add81eaa4154b7e3d8be6873", - sha256 = "67571de4070cff615f7232281d8b12d8400976d21c19d8274386ab02799269fb", - strip_prefix = "rules_cc-{version}", - urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"], - release_date = "2021-09-17", + version = "0.0.1", + sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241", + urls = ["https://github.com/bazelbuild/rules_cc/releases/download/{version}/rules_cc-{version}.tar.gz"], + release_date = "2021-10-07", use_category = ["build"], ), rules_foreign_cc = dict( @@ -691,22 +688,22 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Python rules for Bazel", project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", - version = "9f597623ccfbe430b0d81c82498e33b80b7aec88", - sha256 = "8d61fed6974f1e69e09243ca78c9ecf82f50fa3de64bb5df6b0b9061f9c9639b", - release_date = "2021-09-07", - strip_prefix = "rules_python-{version}", - urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], + version = "0.4.0", + sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea", + release_date = "2021-09-12", + urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"], use_category = ["build"], ), rules_pkg = dict( project_name = "Packaging rules for Bazel", project_desc = "Bazel rules for the packaging distributions", project_url = "https://github.com/bazelbuild/rules_pkg", - version = "0.5.1", - sha256 = "a89e203d3cf264e564fcb96b6e06dd70bc0557356eb48400ce4b5d97c2c3720d", - urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"], + version = "ad57589abb069baa48f982778de408ea02d714fd", + sha256 = "ec14799a45f1d3b6c3e61c4d04513001bddac9208f09077b1f8c91ab47d234d2", + strip_prefix = "rules_pkg-{version}/pkg", + urls = ["https://github.com/bazelbuild/rules_pkg/archive/{version}.tar.gz"], use_category = ["build"], - release_date = "2021-08-18", + release_date = "2021-10-22", ), six = dict( project_name = "Six", @@ -719,14 +716,16 @@ REPOSITORY_LOCATIONS_SPEC = dict( use_category = ["other"], ), org_llvm_llvm = dict( + # When changing this, you must re-generate the list of llvm libs + # see `bazel/foreign_cc/BUILD` for further information. project_name = "LLVM", project_desc = "LLVM Compiler Infrastructure", project_url = "https://llvm.org", - version = "10.0.0", - sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf", + version = "12.0.1", + sha256 = "7d9a8405f557cefc5a21bf5672af73903b64749d9bc3a50322239f56f34ffddf", strip_prefix = "llvm-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"], - release_date = "2020-03-24", + release_date = "2021-07-09", use_category = ["dataplane_ext"], extensions = [ "envoy.wasm.runtime.wamr", @@ -738,11 +737,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Webassembly Micro Runtime", project_desc = "A standalone runtime with a small footprint for WebAssembly", project_url = "https://github.com/bytecodealliance/wasm-micro-runtime", - version = "b554a9d05d89bb4ef28068b4ae4d0ee6c99bc9db", - sha256 = "de6b68118c5d4b0d37c9049fa08fae6a850304522ec307f087f0eca4ad8fff57", + version = "WAMR-08-10-2021", + sha256 = "4016f8330b2ed4fb5d9541ecd5bc4298f324097803a1f270fdbe691389cedfd9", strip_prefix = "wasm-micro-runtime-{version}", urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"], - release_date = "2021-07-06", + release_date = "2021-08-10", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wamr"], cpe = "N/A", @@ -751,11 +750,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WAVM", project_desc = "WebAssembly Virtual Machine", project_url = "https://wavm.github.io", - version = "79c3aa29366615d9b1593cd527e5b4b94cc6072a", - sha256 = "ce899269516313b400005a8cc9bc3bcd8329663f43f7b4baae211ea0cd456a39", + version = "9ffd3e2f8dcbbe4e965825c32195bd70d6ebc95d", + sha256 = "e4d2d1f53deda4313209b6edceddfc59eb93f367cf3ca41b590ac2e54bb7daf3", strip_prefix = "WAVM-{version}", urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"], - release_date = "2021-03-31", + release_date = "2021-10-16", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wavm"], cpe = "cpe:2.3:a:webassembly_virtual_machine_project:webassembly_virtual_machine:*", @@ -764,14 +763,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "wasmtime", project_desc = "A standalone runtime for WebAssembly", project_url = "https://github.com/bytecodealliance/wasmtime", - version = "0.26.0", - sha256 = "e95d274822ac72bf06355bdfbeddcacae60d7e98fec8ee4b2e21740636fb5c2c", + version = "0.30.0", + sha256 = "78eccfd8c8d63c30e85762bf36cf032409b7c34ac34f329b7e228ea6cc7aebca", strip_prefix = "wasmtime-{version}", urls = ["https://github.com/bytecodealliance/wasmtime/archive/v{version}.tar.gz"], - release_date = "2021-04-05", + release_date = "2021-09-17", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wasmtime"], - cpe = "N/A", + cpe = "cpe:2.3:a:bytecodealliance:wasmtime:*", ), com_github_wasm_c_api = dict( project_name = "wasm-c-api", @@ -806,8 +805,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "curl", project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", - version = "7.79.0", - sha256 = "aff0c7c4a526d7ecc429d2f96263a85fa73e709877054d593d8af3d136858074", + version = "7.79.1", + sha256 = "370b11201349816287fb0ccc995e420277fbfcaf76206e309b3f60f0eda090c2", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], use_category = ["dataplane_ext", "observability_ext"], @@ -817,56 +816,57 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], - release_date = "2021-09-15", + release_date = "2021-09-22", cpe = "cpe:2.3:a:haxx:libcurl:*", ), com_googlesource_chromium_v8 = dict( project_name = "V8", project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", project_url = "https://v8.dev", - version = "9.2.230.13", + version = "9.5.172.21", # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. - sha256 = "77b4d6aaabe1dc60bf6bd2523a187d82292c27a2073ec48610dd098e3d4f80ce", + sha256 = "cd19ab73840031b65f246ebf35a59b224fb043656d772b675b72d12215ec2fd0", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"], + strip_prefix = "wee8", use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.v8"], - release_date = "2021-06-25", + release_date = "2021-10-12", cpe = "cpe:2.3:a:google:v8:*", ), com_github_google_quiche = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "4c1d482ab708b6e945afc53816e1bf5dc342f11e", - sha256 = "454002442133ed5dfff59686a8569effc48aabb21a756b92e1ab2173f632cc14", + version = "4c6ad6445246da3c6d3e7db920003321880048f8", + sha256 = "2a9823044b97b6055c2e4d84f6bdff5c4f66b9f18333ff58e270d23091a2b4ca", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-09-27", + release_date = "2021-10-26", cpe = "N/A", ), com_googlesource_googleurl = dict( project_name = "Chrome URL parsing library", project_desc = "Chrome URL parsing library", project_url = "https://quiche.googlesource.com/googleurl", - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. - version = "ef0d23689e240e6c8de4c3a5296b209128c87373", - sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", + # Static snapshot of https://quiche.googlesource.com/googleurl/+archive/561705e0066ff11e6cb97b8092f1547835beeb92.tar.gz. + version = "561705e0066ff11e6cb97b8092f1547835beeb92", + sha256 = "7ce00768fea1fa4c7bf658942f13e41c9ba30e9cff931a6cda2f9fd02289f673", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], extensions = [], - release_date = "2020-07-30", + release_date = "2021-08-31", cpe = "N/A", ), com_google_cel_cpp = dict( project_name = "Common Expression Language (CEL) C++ library", project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", - version = "0.6.1", - sha256 = "d001494f1aa7d88172af944233fac3d7f83d9183d66590aa787aa2a35aab0440", + version = "89d81b2d2c24943b6e4fd5e8fc321099c2ab6d3f", + sha256 = "1408ef31e77ed847b420ff108da9652ad1702401008f2a75b671fba860a9707d", strip_prefix = "cel-cpp-{version}", - urls = ["https://github.com/google/cel-cpp/archive/v{version}.tar.gz"], + urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -879,17 +879,17 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.stat_sinks.wasm", "envoy.rbac.matchers.upstream_ip_port", ], - release_date = "2021-06-28", + release_date = "2021-10-07", cpe = "N/A", ), com_github_google_flatbuffers = dict( project_name = "FlatBuffers", project_desc = "Cross platform serialization library architected for maximum memory efficiency", project_url = "https://github.com/google/flatbuffers", - version = "a83caf5910644ba1c421c002ef68e42f21c15f9f", - sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", + version = "2.0.0", + sha256 = "9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4", strip_prefix = "flatbuffers-{version}", - urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"], + urls = ["https://github.com/google/flatbuffers/archive/v{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -902,19 +902,19 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.stat_sinks.wasm", "envoy.rbac.matchers.upstream_ip_port", ], - release_date = "2020-04-02", - cpe = "N/A", + release_date = "2021-05-10", + cpe = "cpe:2.3:a:google:flatbuffers:*", ), com_googlesource_code_re2 = dict( project_name = "RE2", project_desc = "RE2, a regular expression library", project_url = "https://github.com/google/re2", - version = "2020-07-06", - sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", + version = "2021-09-01", + sha256 = "42a2e1d56b5de252f5d418dc1cc0848e9e52ca22b056453988b18c6195ec7f8d", strip_prefix = "re2-{version}", urls = ["https://github.com/google/re2/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - release_date = "2020-07-06", + release_date = "2021-09-01", cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but @@ -931,6 +931,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"], release_date = "2021-07-09", use_category = ["test_only"], + cpe = "cpe:2.3:a:llvm:compiler-rt:*", ), upb = dict( project_name = "upb", @@ -1020,8 +1021,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "03185974ef574233a5f6383311eb74a380146fe2", - sha256 = "34948e3ba239cc721af8d0a0a5b678325f363cbd542bddecf2267d24780d5b4d", + version = "9ec1f94005071a9a57ec04fe64031e8b5456253b", + sha256 = "74c7e73c0f60f2d1af2457d293d9faa461d5c7efc1664c15df74bbf678460251", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -1037,7 +1038,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.wasm.runtime.wavm", "envoy.wasm.runtime.wasmtime", ], - release_date = "2021-08-12", + release_date = "2021-10-18", cpe = "N/A", ), proxy_wasm_rust_sdk = dict( @@ -1067,13 +1068,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel rust rules", project_desc = "Bazel rust rules (used by Wasm)", project_url = "https://github.com/bazelbuild/rules_rust", - version = "7e7246f6c48a5d4e69744cd79b9ccb8886966ee2", - sha256 = "d54b379559f3fe6ff0cd251be216a5e35acf241451eec8144455482e8f4748f8", + version = "82b650d5d0709ae4c0ee8584f4ed92112ba11d67", + sha256 = "d087851b76204935f7f23c172eb0d136c09720b8484d8151019523652ce77004", strip_prefix = "rules_rust-{version}", urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.wasm.runtime.wasmtime"], - release_date = "2021-06-29", + release_date = "2021-10-19", cpe = "N/A", ), rules_antlr = dict( @@ -1117,4 +1118,17 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2018-12-18", cpe = "N/A", ), + com_github_fdio_vpp_vcl = dict( + project_name = "VPP Comms Library", + project_desc = "FD.io Vector Packet Processor (VPP) Comms Library", + project_url = "https://fd.io/", + version = "596c45b22211c9af243b624dc037f58c0aa1c302", + sha256 = "e4c3fad7e1a6952e5c081cfe25f1f091d97fae8e75c5f03205def37d34c27741", + strip_prefix = "vpp-{version}", + urls = ["https://github.com/FDio/vpp/archive/{version}.tar.gz"], + use_category = ["other"], + extensions = ["envoy.bootstrap.vcl"], + release_date = "2021-09-13", + cpe = "N/A", + ), ) diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 9eb97f75afe6..77cef1e83ecf 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -38,8 +38,13 @@ build_args() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" FILE_SUFFIX="${FILE_SUFFIX/-contrib/}" + FILE_SUFFIX="${FILE_SUFFIX/-ltsc2022/}" printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}" + if [[ "${TYPE}" == *-windows* ]]; then + printf ' --build-arg BUILD_OS=%s --build-arg BUILD_TAG=%s' "${WINDOWS_IMAGE_BASE}" "${WINDOWS_IMAGE_TAG}" + fi + if [[ "${TYPE}" == *-contrib* ]]; then printf ' --build-arg ENVOY_BINARY=envoy-contrib' fi @@ -103,7 +108,7 @@ push_images() { PLATFORM="$(build_platforms "${TYPE}")" # docker buildx doesn't do push with default builder docker "${BUILD_COMMAND[@]}" --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \ - docker push "${BUILD_TAG}" + docker push "${BUILD_TAG}" } MAIN_BRANCH="refs/heads/main" @@ -125,7 +130,7 @@ DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" if is_windows; then - BUILD_TYPES=("-windows") + BUILD_TYPES=("-${WINDOWS_BUILD_TYPE}") # BuildKit is not available for Windows images, use standard build command BUILD_COMMAND=("build") else diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index 5ef74fa49119..2447a79e41d4 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -25,6 +25,8 @@ sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel/ cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ +rm -f "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/.bazelversion +cp -f "${ENVOY_SRCDIR}"/.bazelversion "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ cp -f "$(bazel info workspace)"/*.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ export FILTER_WORKSPACE_SET=1 diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh index 844351d51d91..7b3938965d95 100755 --- a/ci/mac_ci_steps.sh +++ b/ci/mac_ci_steps.sh @@ -60,4 +60,4 @@ fi bazel test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" # Additionally run macOS specific test suites -bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/common/network:apple_dns_impl_test +bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/extensions/network/dns_resolver/apple:apple_dns_impl_test diff --git a/ci/osx-build-config/extensions_build_config.bzl b/ci/osx-build-config/extensions_build_config.bzl index 379d6748e5a9..1c96ee887c44 100644 --- a/ci/osx-build-config/extensions_build_config.bzl +++ b/ci/osx-build-config/extensions_build_config.bzl @@ -10,6 +10,8 @@ EXTENSIONS = { "envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config", + "envoy.network.dns_resolver.cares": "//source/extensions/network/dns_resolver/cares:config", + "envoy.network.dns_resolver.apple": "//source/extensions/network/dns_resolver/apple:config", } WINDOWS_EXTENSIONS = {} EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"] diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 0aca2629f3c4..1665334447f4 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -38,7 +38,7 @@ function exclude_win32_impl() { # Do not run clang-tidy against macOS impl # TODO: We should run clang-tidy against macOS impl for completeness function exclude_macos_impl() { - grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test + grep -v source/common/filesystem/kqueue/ | grep -v source/extensions/network/dns_resolver/apple/apple_dns_impl | grep -v test/extensions/network/dns_resolver/apple/apple_dns_impl_test } # Do not run incremental clang-tidy on check_format testdata files. @@ -46,12 +46,6 @@ function exclude_check_format_testdata() { grep -v tools/testdata/check_format/ } -# Do not run clang-tidy against Chromium URL import, this needs to largely -# reflect the upstream structure. -function exclude_chromium_url() { - grep -v source/common/chromium_url/ -} - # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ @@ -83,7 +77,7 @@ function exclude_wasm_examples() { } function filter_excludes() { - exclude_check_format_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data | exclude_wasm_examples + exclude_check_format_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data | exclude_wasm_examples } function run_clang_tidy() { @@ -95,7 +89,13 @@ function run_clang_tidy() { } function run_clang_tidy_diff() { - git diff "$1" | filter_excludes | \ + local diff + diff="$(git diff "${1}")" + if [[ -z "$diff" ]]; then + echo "No changes detected, skipping clang_tidy_diff" + return 0 + fi + echo "$diff" | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary="${CLANG_TIDY}" \ -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index eed32c121886..110535e63576 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -91,11 +91,13 @@ fi if [[ $BUILD_ENVOY_STATIC -eq 1 ]]; then bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static - # Copy binary to delivery directory + # Copy binary and pdb to delivery directory cp -f bazel-bin/source/exe/envoy-static.exe "${ENVOY_DELIVERY_DIR}/envoy.exe" + cp -f bazel-bin/source/exe/envoy-static.pdb "${ENVOY_DELIVERY_DIR}/envoy.pdb" # Copy for azp, creating a tar archive tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_DELIVERY_DIR}" envoy.exe + tar czf "${ENVOY_BUILD_DIR}"/envoy_binary_debug.tar.gz -C "${ENVOY_DELIVERY_DIR}" envoy.exe envoy.pdb fi # Test invocations of known-working tests on Windows diff --git a/configs/google_com_auto_http3_upstream_proxy.yaml b/configs/google_com_auto_http3_upstream_proxy.yaml new file mode 100644 index 000000000000..8767f87a59ef --- /dev/null +++ b/configs/google_com_auto_http3_upstream_proxy.yaml @@ -0,0 +1,72 @@ +# An example config which accepts HTTP/1 requests over TCP and forwards them to google using HTTP/3 +admin: + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 9901 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + scheme_header_transformation: + scheme_to_overwrite: https + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + host_rewrite_literal: www.google.com + cluster: service_google + http_filters: + - name: alternate_protocols_cache + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.alternate_protocols_cache.v3.FilterConfig + alternate_protocols_cache_options: + name: default_alternate_protocols_cache + - name: envoy.filters.http.router + clusters: + - name: service_google + connect_timeout: 30s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + auto_config: + http3_protocol_options: {} + alternate_protocols_cache_options: + name: default_alternate_protocols_cache + common_http_protocol_options: + idle_timeout: 1s + transport_socket: + name: envoy.transport_sockets.quic + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport + upstream_tls_context: + sni: www.google.com diff --git a/configs/requirements.txt b/configs/requirements.txt index 1cd69909b996..7e65450464ab 100644 --- a/configs/requirements.txt +++ b/configs/requirements.txt @@ -1,6 +1,6 @@ -Jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 +Jinja2==3.0.2 \ + --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c \ + --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45 MarkupSafe==2.0.1 \ --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl index 3a9987a910e4..79708cc65db0 100644 --- a/contrib/contrib_build_config.bzl +++ b/contrib/contrib_build_config.bzl @@ -29,4 +29,10 @@ CONTRIB_EXTENSIONS = { # "envoy.tls.key_providers.cryptomb": "//contrib/cryptomb/private_key_providers/source:config", + + # + # Socket interface extensions + # + + "envoy.bootstrap.vcl": "//contrib/vcl/source:config", } diff --git a/contrib/extensions_metadata.yaml b/contrib/extensions_metadata.yaml index 215a7936f060..392df4fea682 100644 --- a/contrib/extensions_metadata.yaml +++ b/contrib/extensions_metadata.yaml @@ -48,3 +48,8 @@ envoy.tls.key_providers.cryptomb: - envoy.tls.key_providers security_posture: robust_to_untrusted_downstream status: alpha +envoy.bootstrap.vcl: + categories: + - envoy.bootstrap + security_posture: requires_trusted_downstream_and_upstream + status: alpha diff --git a/contrib/rocketmq_proxy/filters/network/source/BUILD b/contrib/rocketmq_proxy/filters/network/source/BUILD index b15d7db7e41b..d274adf6c6d0 100644 --- a/contrib/rocketmq_proxy/filters/network/source/BUILD +++ b/contrib/rocketmq_proxy/filters/network/source/BUILD @@ -10,8 +10,8 @@ licenses(["notice"]) # Apache 2 envoy_contrib_package() envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], + name = "constant", + hdrs = ["constant.h"], deps = ["//source/common/singleton:const_singleton"], ) @@ -58,8 +58,8 @@ envoy_cc_library( name = "protocol_lib", srcs = ["protocol.cc"], deps = [ + ":constant", ":protocol_interface", - ":well_known_names", "//source/common/common:enum_to_int", ], ) @@ -91,10 +91,10 @@ envoy_cc_library( ], deps = [ ":codec_lib", + ":constant", ":protocol_lib", ":rocketmq_lib", ":stats_interface", - ":well_known_names", "//contrib/rocketmq_proxy/filters/network/source/router:router_interface", "//envoy/buffer:buffer_interface", "//envoy/event:dispatcher_interface", diff --git a/contrib/rocketmq_proxy/filters/network/source/active_message.cc b/contrib/rocketmq_proxy/filters/network/source/active_message.cc index 15e0f505e3f4..f960a343d810 100644 --- a/contrib/rocketmq_proxy/filters/network/source/active_message.cc +++ b/contrib/rocketmq_proxy/filters/network/source/active_message.cc @@ -9,8 +9,8 @@ #include "absl/strings/match.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" using Envoy::Tcp::ConnectionPool::ConnectionDataPtr; diff --git a/contrib/rocketmq_proxy/filters/network/source/well_known_names.h b/contrib/rocketmq_proxy/filters/network/source/constant.h similarity index 100% rename from contrib/rocketmq_proxy/filters/network/source/well_known_names.h rename to contrib/rocketmq_proxy/filters/network/source/constant.h diff --git a/contrib/rocketmq_proxy/filters/network/source/protocol.cc b/contrib/rocketmq_proxy/filters/network/source/protocol.cc index 7b9ff954798e..cd0481710ba1 100644 --- a/contrib/rocketmq_proxy/filters/network/source/protocol.cc +++ b/contrib/rocketmq_proxy/filters/network/source/protocol.cc @@ -3,7 +3,7 @@ #include "source/common/common/assert.h" #include "source/common/common/enum_to_int.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" namespace Envoy { namespace Extensions { diff --git a/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc index 138ff56bd747..b5b5a59d62d2 100644 --- a/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc +++ b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc @@ -6,8 +6,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/active_message.h" #include "contrib/rocketmq_proxy/filters/network/source/codec.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/protocol.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" namespace Envoy { namespace Extensions { diff --git a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc index 8ac6645efc7b..a471349d05d0 100644 --- a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc @@ -6,8 +6,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/active_message.h" #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/protocol.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc index e2c7d835c745..4c4a1d302013 100644 --- a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc @@ -9,7 +9,7 @@ #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/contrib/rocketmq_proxy/filters/network/test/router_test.cc b/contrib/rocketmq_proxy/filters/network/test/router_test.cc index 681439d9212a..e4128ee0d36c 100644 --- a/contrib/rocketmq_proxy/filters/network/test/router_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/router_test.cc @@ -2,8 +2,8 @@ #include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/constant.h" #include "contrib/rocketmq_proxy/filters/network/source/router/router.h" -#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" #include "contrib/rocketmq_proxy/filters/network/test/mocks.h" #include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gtest/gtest.h" diff --git a/contrib/vcl/source/BUILD b/contrib/vcl/source/BUILD new file mode 100644 index 000000000000..a75f027b068c --- /dev/null +++ b/contrib/vcl/source/BUILD @@ -0,0 +1,99 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_contrib_package", +) +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +# VPP Comms Lib (VCL) adaptor. + +envoy_contrib_package() + +cc_library( + name = "vpp_vcl", + srcs = [ + "external/libsvm.a", + "external/libvlibmemoryclient.a", + "external/libvppcom.a", + "external/libvppinfra.a", + ], + hdrs = ["external/vppcom.h"], + defines = ["VPP_VCL"], + includes = ["external/"], + tags = ["skip_on_windows"], + visibility = ["//visibility:public"], +) + +genrule( + name = "build", + srcs = [ + "@com_github_fdio_vpp_vcl//:all", + ], + outs = [ + "external/libvppcom.a", + "external/libvppinfra.a", + "external/libsvm.a", + "external/libvlibmemoryclient.a", + "external/vppcom.h", + ], + cmd = """ + ./$(location :vcl_build_launcher) vpp_vcl_build.sh $(location external/libvppcom.a) + """, + tools = [ + ":vcl_build_launcher", + ], +) + +py_binary( + name = "vcl_build_launcher", + srcs = ["vcl_build_launcher.py"], + data = [ + "vpp_vcl_build.sh", + ], + main = "vcl_build_launcher.py", + deps = [requirement("ply")], +) + +envoy_cc_library( + name = "vcl_interface_lib", + srcs = [ + "vcl_event.cc", + "vcl_interface.cc", + "vcl_io_handle.cc", + ], + hdrs = [ + "vcl_event.h", + "vcl_interface.h", + "vcl_io_handle.h", + ], + visibility = ["//visibility:public"], + deps = [ + ":vpp_vcl", + "//envoy/event:dispatcher_interface", + "//envoy/network:socket_interface", + "//source/common/common:minimal_logger_lib", + "//source/common/event:dispatcher_includes", + "//source/common/event:dispatcher_lib", + "//source/common/event:libevent_scheduler_lib", + "//source/common/network:address_lib", + "//source/common/network:io_socket_error_lib", + "//source/common/network:socket_interface_lib", + "//source/common/network:socket_lib", + ], +) + +envoy_cc_contrib_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + visibility = ["//visibility:public"], + deps = [ + ":vcl_interface_lib", + "@envoy_api//contrib/envoy/extensions/vcl/v3alpha:pkg_cc_proto", + ], +) diff --git a/contrib/vcl/source/config.cc b/contrib/vcl/source/config.cc new file mode 100644 index 000000000000..89c918f8fd2f --- /dev/null +++ b/contrib/vcl/source/config.cc @@ -0,0 +1,56 @@ +#include "contrib/vcl/source/config.h" + +#include "contrib/envoy/extensions/vcl/v3alpha/vcl_socket_interface.pb.h" +#include "contrib/vcl/source/vcl_interface.h" +#include "contrib/vcl/source/vcl_io_handle.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +Server::BootstrapExtensionPtr +VclSocketInterface::createBootstrapExtension(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext& ctx) { + + vclInterfaceInit(ctx.mainThreadDispatcher(), ctx.options().concurrency()); + return std::make_unique(*this); +} + +ProtobufTypes::MessagePtr VclSocketInterface::createEmptyConfigProto() { + return std::make_unique(); +} + +Envoy::Network::IoHandlePtr VclSocketInterface::socket(Envoy::Network::Socket::Type socket_type, + Envoy::Network::Address::Type addr_type, + Envoy::Network::Address::IpVersion, + bool) const { + if (vppcom_worker_index() == -1) { + vclInterfaceWorkerRegister(); + } + VCL_LOG("trying to create socket1 epoll fd {}", vppcom_mq_epoll_fd()); + if (addr_type == Envoy::Network::Address::Type::Pipe) { + return nullptr; + } + uint32_t sh = vppcom_session_create( + socket_type == Envoy::Network::Socket::Type::Stream ? VPPCOM_PROTO_TCP : VPPCOM_PROTO_UDP, 1); + if (!VCL_SH_VALID(sh)) { + return nullptr; + } + return std::make_unique(sh, VclInvalidFd); +} + +Envoy::Network::IoHandlePtr +VclSocketInterface::socket(Envoy::Network::Socket::Type socket_type, + const Envoy::Network::Address::InstanceConstSharedPtr addr) const { + return socket(socket_type, addr->type(), Envoy::Network::Address::IpVersion::v4, false); +} + +bool VclSocketInterface::ipFamilySupported(int) { return true; }; + +REGISTER_FACTORY(VclSocketInterface, Server::Configuration::BootstrapExtensionFactory); + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/config.h b/contrib/vcl/source/config.h new file mode 100644 index 000000000000..65c9c53bfbcb --- /dev/null +++ b/contrib/vcl/source/config.h @@ -0,0 +1,41 @@ +#pragma once + +#include "source/common/network/socket_interface.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +class VclSocketInterfaceExtension : public Envoy::Network::SocketInterfaceExtension { +public: + VclSocketInterfaceExtension(Envoy::Network::SocketInterface& sock_interface) + : Envoy::Network::SocketInterfaceExtension(sock_interface) {} +}; + +class VclSocketInterface : public Envoy::Network::SocketInterfaceBase { +public: + // Network::SocketInterface + Envoy::Network::IoHandlePtr socket(Envoy::Network::Socket::Type socket_type, + Envoy::Network::Address::Type addr_type, + Envoy::Network::Address::IpVersion version, + bool socket_v6only) const override; + Envoy::Network::IoHandlePtr + socket(Envoy::Network::Socket::Type socket_type, + const Envoy::Network::Address::InstanceConstSharedPtr addr) const override; + bool ipFamilySupported(int domain) override; + + // Server::Configuration::BootstrapExtensionFactory + Server::BootstrapExtensionPtr + createBootstrapExtension(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + std::string name() const override { return "envoy.extensions.vcl.vcl_socket_interface"; }; +}; + +DECLARE_FACTORY(VclSocketInterface); + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_build_launcher.py b/contrib/vcl/source/vcl_build_launcher.py new file mode 100644 index 000000000000..28f5cd219b73 --- /dev/null +++ b/contrib/vcl/source/vcl_build_launcher.py @@ -0,0 +1,29 @@ +#!/usr/bin/python + +# Launcher for building vcl + +import os +import subprocess +import sys + + +def main(): + """ VCL builder script """ + + # find path to helper script + script_path = os.path.dirname(os.path.abspath(sys.argv[0])) + vcl_build = f"{script_path}/{sys.argv[1]}" + + # find path to vpp/vcl source code + base_path = os.path.dirname(os.path.abspath(sys.argv[1])) + vpp_path = f"{base_path}/external/com_github_fdio_vpp_vcl" + + # find path to dst folder + dst_path = os.path.dirname(os.path.abspath(sys.argv[2])) + + # build vcl + subprocess.run([vcl_build, vpp_path, dst_path]) + + +if __name__ == "__main__": + main() diff --git a/contrib/vcl/source/vcl_event.cc b/contrib/vcl/source/vcl_event.cc new file mode 100644 index 000000000000..b37ecbb415b3 --- /dev/null +++ b/contrib/vcl/source/vcl_event.cc @@ -0,0 +1,69 @@ +#include "contrib/vcl/source/vcl_event.h" + +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +VclEvent::VclEvent(Event::Dispatcher& dispatcher, VclIoHandle& io_handle, Event::FileReadyCb cb) + : cb_(cb), io_handle_(io_handle), activation_cb_(dispatcher.createSchedulableCallback([this]() { + ASSERT(injected_activation_events_ != 0); + mergeInjectedEventsAndRunCb(); + })) {} + +VclEvent::~VclEvent() { + // Worker listeners are valid only as long as the event is valid + if (io_handle_.isWrkListener()) { + VclIoHandle* parentListener = io_handle_.getParentListener(); + if (parentListener) { + parentListener->clearChildWrkListener(); + } + if (VCL_SH_VALID(io_handle_.sh())) { + io_handle_.close(); + } + } +} + +void VclEvent::activate(uint32_t events) { + // events is not empty. + ASSERT(events != 0); + // Only supported event types are set. + ASSERT((events & (Event::FileReadyType::Read | Event::FileReadyType::Write | + Event::FileReadyType::Closed)) == events); + + cb_(events); + + // Schedule the activation callback so it runs as part of the next loop iteration if it is not + // already scheduled. + if (injected_activation_events_ == 0) { + ASSERT(!activation_cb_->enabled()); + activation_cb_->scheduleCallbackNextIteration(); + } + ASSERT(activation_cb_->enabled()); + + // Merge new events with pending injected events. + injected_activation_events_ |= events; +} + +void VclEvent::setEnabled(uint32_t events) { io_handle_.updateEvents(events); } + +void VclEvent::mergeInjectedEventsAndRunCb() { + uint32_t events = 0; + if (injected_activation_events_ != 0) { + events |= injected_activation_events_; + injected_activation_events_ = 0; + activation_cb_->cancel(); + } + cb_(events); +} + +void VclEvent::unregisterEventIfEmulatedEdge(uint32_t) {} + +void VclEvent::registerEventIfEmulatedEdge(uint32_t) {} + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_event.h b/contrib/vcl/source/vcl_event.h new file mode 100644 index 000000000000..eefb3ae09e83 --- /dev/null +++ b/contrib/vcl/source/vcl_event.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include "envoy/event/file_event.h" + +#include "source/common/event/dispatcher_impl.h" +#include "source/common/event/event_impl_base.h" + +#include "contrib/vcl/source/vcl_io_handle.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +class VclEvent : public Envoy::Event::FileEvent { +public: + VclEvent(Event::Dispatcher& dispatcher, VclIoHandle& io_handle, Event::FileReadyCb cb); + ~VclEvent() override; + + // Event::FileEvent + void activate(uint32_t events) override; + void setEnabled(uint32_t events) override; + void unregisterEventIfEmulatedEdge(uint32_t event) override; + void registerEventIfEmulatedEdge(uint32_t event) override; + +private: + void mergeInjectedEventsAndRunCb(); + + Event::FileReadyCb cb_; + VclIoHandle& io_handle_; + + // Injected FileReadyType events that were scheduled by recent calls to activate() and are pending + // delivery. + uint32_t injected_activation_events_{}; + // Used to schedule delayed event activation. Armed iff pending_activation_events_ != 0. + Event::SchedulableCallbackPtr activation_cb_; +}; + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_interface.cc b/contrib/vcl/source/vcl_interface.cc new file mode 100644 index 000000000000..3b877fa00c4e --- /dev/null +++ b/contrib/vcl/source/vcl_interface.cc @@ -0,0 +1,139 @@ +#include "contrib/vcl/source/vcl_interface.h" + +#include "source/common/network/address_impl.h" + +#include "contrib/vcl/source/vcl_io_handle.h" +#include "vppcom.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +namespace { + +/** + * Max number of epoll events to drain from VCL per `vppcom_epoll_wait` call + */ +const int MaxNumEpollEvents = 128; + +/** + * Envoy worker epoll session handles by VCL worker index, i.e., `vppcom_worker_index()`. Each + * worker uses its respective handle to retrieve session events from VCL via `vppcom_epoll_wait()`. + */ +std::vector& epollHandles() { MUTABLE_CONSTRUCT_ON_FIRST_USE(std::vector); } + +/** + * Mutex only used during VCL worker registration + */ +ABSL_CONST_INIT absl::Mutex wrk_lock(absl::kConstInit); + +/** + * Map of VCL workers to message queue eventfd file events + */ +using MqFileEventsMap = absl::flat_hash_map; +MqFileEventsMap& mqFileEventsMap() { MUTABLE_CONSTRUCT_ON_FIRST_USE(MqFileEventsMap); } + +void onMqSocketEvents(uint32_t flags) { + ASSERT((flags & (Event::FileReadyType::Read | Event::FileReadyType::Write))); + int wrk_index = vppcom_worker_index(); + VCL_LOG("events on worker {}", wrk_index); + struct epoll_event events[MaxNumEpollEvents]; + int max_events = MaxNumEpollEvents; + uint32_t epoll_fd = vclEpollHandle(wrk_index); + + while (max_events > 0) { + int n_events = vppcom_epoll_wait(epoll_fd, events, max_events, 0); + if (n_events <= 0) { + break; + } + max_events -= n_events; + VCL_LOG("had {} events", n_events); + + for (int i = 0; i < n_events; i++) { + auto vcl_handle = reinterpret_cast(events[i].data.u64); + if (vcl_handle->isWrkListener()) { + vcl_handle = vcl_handle->getParentListener(); + } + + // session closed due to some recently processed event + if (!vcl_handle->isOpen()) { + continue; + } + + uint32_t evts = 0; + if (events[i].events & EPOLLIN) { + evts |= Event::FileReadyType::Read; + } + if (events[i].events & EPOLLOUT) { + evts |= Event::FileReadyType::Write; + } + if (events[i].events & (EPOLLERR | EPOLLHUP)) { + evts |= Event::FileReadyType::Closed; + } + + VCL_LOG("got event on vcl handle fd {} sh {:x} events {}", vcl_handle->fdDoNotUse(), + vcl_handle->sh(), evts); + vcl_handle->cb(evts); + } + } +} + +} // namespace + +uint32_t vclEpollHandle(uint32_t wrk_index) { + std::vector& epoll_handles = epollHandles(); + RELEASE_ASSERT(wrk_index < epoll_handles.size(), "epoll handles worker index"); + return epoll_handles[wrk_index]; +} + +void vclInterfaceWorkerRegister() { + { + absl::MutexLock lk(&wrk_lock); + RELEASE_ASSERT(vppcom_worker_register() == VPPCOM_OK, "failed to register VCL worker"); + } + const int wrk_index = vppcom_worker_index(); + int epoll_handle = vppcom_epoll_create(); + std::vector& epoll_handles = epollHandles(); + RELEASE_ASSERT(static_cast(wrk_index) < epoll_handles.size(), + "epoll handles worker index"); + epoll_handles[wrk_index] = epoll_handle; + VCL_LOG("registered worker {} and epoll handle {:x} mq fd {}", wrk_index, epoll_handle, + vppcom_mq_epoll_fd()); +} + +void vclInterfaceRegisterEpollEvent(Envoy::Event::Dispatcher& dispatcher) { + MqFileEventsMap& mq_fevts_map = mqFileEventsMap(); + const int wrk_index = vppcom_worker_index(); + RELEASE_ASSERT(wrk_index != -1, ""); + if (mq_fevts_map.find(wrk_index) != mq_fevts_map.end()) { + return; + } + mq_fevts_map[wrk_index] = dispatcher.createFileEvent( + vppcom_mq_epoll_fd(), [](uint32_t events) -> void { onMqSocketEvents(events); }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Write); +} + +void vclInterfaceInit(Event::Dispatcher& dispatcher, uint32_t concurrency) { + MqFileEventsMap& mq_fevts_map = mqFileEventsMap(); + vppcom_app_create("envoy"); + const int wrk_index = vppcom_worker_index(); + std::vector& epoll_handles = epollHandles(); + // Assume we may have additional threads that request network access + epoll_handles.resize(std::max(concurrency, static_cast(1)) * 2); + epoll_handles[wrk_index] = vppcom_epoll_create(); + mq_fevts_map[wrk_index] = dispatcher.createFileEvent( + vppcom_mq_epoll_fd(), [](uint32_t events) -> void { onMqSocketEvents(events); }, + Event::FileTriggerType::Edge, Event::FileReadyType::Read | Event::FileReadyType::Write); +} + +void vclInterfaceDrainEvents() { + MqFileEventsMap& mq_fevts_map = mqFileEventsMap(); + const int wrk_index = vppcom_worker_index(); + mq_fevts_map[wrk_index]->activate(Event::FileReadyType::Read | Event::FileReadyType::Write); +} + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_interface.h b/contrib/vcl/source/vcl_interface.h new file mode 100644 index 000000000000..cb3c8c4e2da8 --- /dev/null +++ b/contrib/vcl/source/vcl_interface.h @@ -0,0 +1,52 @@ +#pragma once + +#include "envoy/network/socket.h" + +#include "source/common/network/socket_interface.h" + +#include "vppcom.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +#define VCL_RX_ZC (0) +#define VCL_LOG(fmt, _args...) ENVOY_LOG_MISC(debug, "[{}] " fmt, vppcom_worker_index(), ##_args) + +/** + * VclIoHandle does not rely on linux fds. Constant lower used as invalid fd. + */ +constexpr int VclInvalidFd = 1 << 23; + +/** + * Used to initialize VCL interface when VclSocketInterface extension is loaded. + */ +void vclInterfaceInit(Event::Dispatcher& dispatcher, uint32_t concurrency); + +/** + * Register Envoy worker with VCL and allocate epoll session handle to be used to retrieve per + * worker session events. + */ +void vclInterfaceWorkerRegister(); + +/** + * Create FileEvent for VCL worker message queue `eventfd` if one does not exist. Used to signal + * main dispatch loop that VCL has session events. + */ +void vclInterfaceRegisterEpollEvent(Envoy::Event::Dispatcher& dispatcher); + +/** + * Retrieve epoll session handle for VCL worker. + */ +uint32_t vclEpollHandle(uint32_t wrk_index); + +/** + * Force drain of events on current worker + */ +void vclInterfaceDrainEvents(); + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_io_handle.cc b/contrib/vcl/source/vcl_io_handle.cc new file mode 100644 index 000000000000..f3b5114f38eb --- /dev/null +++ b/contrib/vcl/source/vcl_io_handle.cc @@ -0,0 +1,771 @@ +#include "contrib/vcl/source/vcl_io_handle.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/network/address_impl.h" + +#include "contrib/vcl/source/vcl_event.h" +#include "contrib/vcl/source/vcl_interface.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +namespace { + +int vclWrkIndexOrRegister() { + int wrk_index = vppcom_worker_index(); + if (wrk_index == -1) { + vclInterfaceWorkerRegister(); + wrk_index = vppcom_worker_index(); + } + return wrk_index; +} + +bool peekVclSession(vcl_session_handle_t sh, vppcom_endpt_t* ep, uint32_t* proto) { + int current_wrk = vppcom_worker_index(); + int sh_wrk = vppcom_session_worker(sh); + uint32_t eplen = sizeof(*ep); + + // should NOT be used while system is loaded + vppcom_worker_index_set(sh_wrk); + + if (vppcom_session_attr(sh, VPPCOM_ATTR_GET_LCL_ADDR, ep, &eplen) != VPPCOM_OK) { + return true; + } + + uint32_t buflen = sizeof(uint32_t); + if (vppcom_session_attr(sh, VPPCOM_ATTR_GET_PROTOCOL, proto, &buflen) != VPPCOM_OK) { + return true; + } + + vppcom_worker_index_set(current_wrk); + + return false; +} + +void vclEndptCopy(sockaddr* addr, socklen_t* addrlen, const vppcom_endpt_t& ep) { + if (ep.is_ip4) { + sockaddr_in* addr4 = reinterpret_cast(addr); + addr4->sin_family = AF_INET; + *addrlen = std::min(static_cast(sizeof(struct sockaddr_in)), *addrlen); + memcpy(&addr4->sin_addr, ep.ip, *addrlen); // NOLINT(safe-memcpy) + addr4->sin_port = ep.port; + } else { + sockaddr_in6* addr6 = reinterpret_cast(addr); + addr6->sin6_family = AF_INET6; + *addrlen = std::min(static_cast(sizeof(struct sockaddr_in6)), *addrlen); + memcpy(&addr6->sin6_addr, ep.ip, *addrlen); // NOLINT(safe-memcpy) + addr6->sin6_port = ep.port; + } +} + +Envoy::Network::Address::InstanceConstSharedPtr vclEndptToAddress(const vppcom_endpt_t& ep, + uint32_t sh) { + sockaddr_storage addr; + int len; + + if (ep.is_ip4) { + addr.ss_family = AF_INET; + len = sizeof(struct sockaddr_in); + auto in4 = reinterpret_cast(&addr); + memcpy(&in4->sin_addr, ep.ip, len); // NOLINT(safe-memcpy) + in4->sin_port = ep.port; + } else { + addr.ss_family = AF_INET6; + len = sizeof(struct sockaddr_in6); + auto in6 = reinterpret_cast(&addr); + memcpy(&in6->sin6_addr, ep.ip, len); // NOLINT(safe-memcpy) + in6->sin6_port = ep.port; + } + + try { + // Set v6only to false so that mapped-v6 address can be normalize to v4 + // address. Though dual stack may be disabled, it's still okay to assume the + // address is from a dual stack socket. This is because mapped-v6 address + // must come from a dual stack socket. An actual v6 address can come from + // both dual stack socket and v6 only socket. If |peer_addr| is an actual v6 + // address and the socket is actually v6 only, the returned address will be + // regarded as a v6 address from dual stack socket. However, this address is not going to be + // used to create socket. Wrong knowledge of dual stack support won't hurt. + return *Envoy::Network::Address::addressFromSockAddr(addr, len, /*v6only=*/false); + } catch (const EnvoyException& e) { + PANIC(fmt::format("Invalid remote address for fd: {}, error: {}", sh, e.what())); + } +} + +void vclEndptFromAddress(vppcom_endpt_t& endpt, + Envoy::Network::Address::InstanceConstSharedPtr address) { + endpt.is_cut_thru = 0; + if (address->ip()->version() == Envoy::Network::Address::IpVersion::v4) { + const sockaddr_in* in = reinterpret_cast(address->sockAddr()); + endpt.is_ip4 = 1; + endpt.ip = const_cast(reinterpret_cast(&in->sin_addr)); + endpt.port = static_cast(in->sin_port); + } else { + const sockaddr_in6* in6 = reinterpret_cast(address->sockAddr()); + endpt.is_ip4 = 0; + endpt.ip = const_cast(reinterpret_cast(&in6->sin6_addr)); + endpt.port = static_cast(in6->sin6_port); + } +} + +// Converts a VCL return types to IoCallUint64Result. +Api::IoCallUint64Result vclCallResultToIoCallResult(const int32_t result) { + if (result >= 0) { + // Return nullptr as IoError upon success. + return Api::IoCallUint64Result( + result, Api::IoErrorPtr(nullptr, Envoy::Network::IoSocketError::deleteIoError)); + } + RELEASE_ASSERT(result != VPPCOM_EINVAL, "Invalid argument passed in."); + return Api::IoCallUint64Result( + /*rc=*/0, (result == VPPCOM_EAGAIN + // EAGAIN is frequent enough that its memory allocation should be avoided. + ? Api::IoErrorPtr(Envoy::Network::IoSocketError::getIoSocketEagainInstance(), + Envoy::Network::IoSocketError::deleteIoError) + : Api::IoErrorPtr(new Envoy::Network::IoSocketError(-result), + Envoy::Network::IoSocketError::deleteIoError))); +} + +} // namespace + +VclIoHandle::~VclIoHandle() { + if (VCL_SH_VALID(sh_)) { + VclIoHandle::close(); + } +} + +Api::IoCallUint64Result VclIoHandle::close() { + VCL_LOG("closing sh {:x}", sh_); + RELEASE_ASSERT(VCL_SH_VALID(sh_), "sh must be valid"); + int rc = 0; + + int wrk_index = vclWrkIndexOrRegister(); + + if (is_listener_) { + if (wrk_index) { + uint32_t sh = wrk_listener_->sh(); + RELEASE_ASSERT(wrk_index == vppcom_session_worker(sh), "listener close on wrong thread"); + clearChildWrkListener(); + // sh_ not invalidated yet, waiting for destructor on main to call `vppcom_session_close` + } else { + clearChildWrkListener(); + rc = vppcom_session_close(sh_); + VCL_SET_SH_INVALID(sh_); + } + } else { + rc = vppcom_session_close(sh_); + VCL_SET_SH_INVALID(sh_); + } + + return Api::IoCallUint64Result( + rc, Api::IoErrorPtr(nullptr, Envoy::Network::IoSocketError::deleteIoError)); +} + +bool VclIoHandle::isOpen() const { return VCL_SH_VALID(sh_); } + +Api::IoCallUint64Result VclIoHandle::readv(uint64_t max_length, Buffer::RawSlice* slices, + uint64_t num_slice) { + if (!VCL_SH_VALID(sh_)) { + return vclCallResultToIoCallResult(VPPCOM_EBADFD); + } + + VCL_LOG("reading on sh {:x}", sh_); + + uint64_t num_bytes_read = 0; + int32_t result = 0, rv = 0; + size_t slice_length; + + for (uint64_t i = 0; i < num_slice; i++) { + slice_length = std::min(slices[i].len_, static_cast(max_length - num_bytes_read)); + rv = vppcom_session_read(sh_, slices[i].mem_, slice_length); + if (rv < 0) { + break; + } + num_bytes_read += rv; + if (num_bytes_read == max_length) { + break; + } + } + result = (num_bytes_read == 0) ? rv : num_bytes_read; + VCL_LOG("done reading on sh {:x} bytes {} result {}", sh_, num_bytes_read, result); + return vclCallResultToIoCallResult(result); +} + +#if VCL_RX_ZC +Api::IoCallUint64Result VclIoHandle::read(Buffer::Instance& buffer, absl::optional) { + vppcom_data_segment_t ds[16]; + int32_t rv; + + rv = vppcom_session_read_segments(sh_, ds, 16, ~0); + if (rv < 0) { + return vclCallResultToIoCallResult(rv); + } + + uint32_t ds_index = 0, sh = sh_, len; + int32_t n_bytes = 0; + while (n_bytes < rv) { + len = ds[ds_index].len; + auto fragment = new Envoy::Buffer::BufferFragmentImpl( + ds[ds_index].data, len, + [&, sh](const void*, size_t data_len, + const Envoy::Buffer::BufferFragmentImpl* this_fragment) { + vppcom_session_free_segments(sh, data_len); + delete this_fragment; + }); + + buffer.addBufferFragment(*fragment); + n_bytes += len; + ds_index += 1; + } + + return vclCallResultToIoCallResult(rv); +} +#else +Api::IoCallUint64Result VclIoHandle::read(Buffer::Instance& buffer, + absl::optional max_length_opt) { + uint64_t max_length = max_length_opt.value_or(UINT64_MAX); + if (max_length == 0) { + return Api::ioCallUint64ResultNoError(); + } + + Buffer::Reservation reservation = buffer.reserveForRead(); + Api::IoCallUint64Result result = readv(std::min(reservation.length(), max_length), + reservation.slices(), reservation.numSlices()); + uint64_t bytes_to_commit = result.ok() ? result.return_value_ : 0; + ASSERT(bytes_to_commit <= max_length); + reservation.commit(bytes_to_commit); + return result; +} +#endif + +Api::IoCallUint64Result VclIoHandle::writev(const Buffer::RawSlice* slices, uint64_t num_slice) { + if (!VCL_SH_VALID(sh_)) { + return vclCallResultToIoCallResult(VPPCOM_EBADFD); + } + + VCL_LOG("writing on sh {:x}", sh_); + + uint64_t num_bytes_written = 0; + int32_t result = 0, rv = 0; + + for (uint64_t i = 0; i < num_slice; i++) { + rv = vppcom_session_write(sh_, slices[i].mem_, slices[i].len_); + if (rv < 0) { + break; + } + num_bytes_written += rv; + } + result = (num_bytes_written == 0) ? rv : num_bytes_written; + + return vclCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result VclIoHandle::write(Buffer::Instance& buffer) { + constexpr uint64_t MaxSlices = 16; + Buffer::RawSliceVector slices = buffer.getRawSlices(MaxSlices); + Api::IoCallUint64Result result = writev(slices.begin(), slices.size()); + if (result.ok() && result.return_value_ > 0) { + buffer.drain(static_cast(result.return_value_)); + } + return result; +} + +Api::IoCallUint64Result VclIoHandle::recv(void* buffer, size_t length, int flags) { + VCL_LOG("recv on sh {:x}", sh_); + int rv = vppcom_session_recvfrom(sh_, buffer, length, flags, nullptr); + return vclCallResultToIoCallResult(rv); +} + +Api::IoCallUint64Result VclIoHandle::sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, + int, const Envoy::Network::Address::Ip*, + const Envoy::Network::Address::Instance&) { + if (!VCL_SH_VALID(sh_)) { + return vclCallResultToIoCallResult(VPPCOM_EBADFD); + } + VCL_LOG("sendmsg called on {:x}", sh_); + + absl::FixedArray iov(num_slice); + uint64_t num_slices_to_write = 0; + uint64_t num_bytes_written = 0; + + for (uint64_t i = 0; i < num_slice; i++) { + if (slices[i].mem_ != nullptr && slices[i].len_ != 0) { + iov[num_slices_to_write].iov_base = slices[i].mem_; + iov[num_slices_to_write].iov_len = slices[i].len_; + num_slices_to_write++; + } + } + if (num_slices_to_write == 0) { + return Api::ioCallUint64ResultNoError(); + } + + // VCL has no sendmsg semantics- Treat as a session write followed by a flush + int result = 0; + for (uint64_t i = 0; i < num_slices_to_write; i++) { + int n; + if (i < (num_slices_to_write - 1)) { + n = vppcom_session_write(sh_, iov[i].iov_base, iov[i].iov_len); + if (n < 0) { + result = (num_bytes_written == 0) ? n : num_bytes_written; + break; + } + } else { + // Flush after the last segment is written + n = vppcom_session_write_msg(sh_, iov[i].iov_base, iov[i].iov_len); + if (n < 0) { + result = (num_bytes_written == 0) ? n : num_bytes_written; + break; + } + } + num_bytes_written += n; + } + + return vclCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result VclIoHandle::recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice, + uint32_t self_port, RecvMsgOutput& output) { + if (!VCL_SH_VALID(sh_)) { + return vclCallResultToIoCallResult(VPPCOM_EBADFD); + } + + absl::FixedArray iov(num_slice); + uint64_t num_slices_for_read = 0; + uint64_t num_bytes_recvd = 0; + for (uint64_t i = 0; i < num_slice; i++) { + if (slices[i].mem_ != nullptr && slices[i].len_ != 0) { + iov[num_slices_for_read].iov_base = slices[i].mem_; + iov[num_slices_for_read].iov_len = slices[i].len_; + ++num_slices_for_read; + } + } + + // VCL has no recvmsg semantics- treat as a read into each slice, which is not + // as cumbersome as it sounds, since VCL will simply copy from shared mem buffers + // if the data is available. + uint8_t ipaddr[sizeof(absl::uint128)]; + vppcom_endpt_t endpt; + endpt.ip = ipaddr; + endpt.port = static_cast(self_port); + int result = 0; + + for (uint64_t i = 0; i < num_slices_for_read; i++) { + int n = vppcom_session_recvfrom(sh_, iov[i].iov_base, iov[i].iov_len, 0, &endpt); + if (n < 0) { + result = (num_bytes_recvd == 0) ? n : num_bytes_recvd; + break; + } + if (i == 0) { + output.msg_[0].peer_address_ = vclEndptToAddress(endpt, sh_); + } + num_bytes_recvd += n; + } + + if (result < 0) { + return vclCallResultToIoCallResult(result); + } + + output.dropped_packets_ = nullptr; + + return vclCallResultToIoCallResult(result); +} + +Api::IoCallUint64Result VclIoHandle::recvmmsg(RawSliceArrays&, uint32_t, RecvMsgOutput&) { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} + +bool VclIoHandle::supportsMmsg() const { return false; } + +Api::SysCallIntResult VclIoHandle::bind(Envoy::Network::Address::InstanceConstSharedPtr address) { + if (!VCL_SH_VALID(sh_)) { + return {-1, VPPCOM_EBADFD}; + } + + int wrk_index = vclWrkIndexOrRegister(); + RELEASE_ASSERT(wrk_index != -1, "should be initialized"); + + vppcom_endpt_t endpt; + vclEndptFromAddress(endpt, address); + int32_t rv = vppcom_session_bind(sh_, &endpt); + return {rv < 0 ? -1 : 0, -rv}; +} + +Api::SysCallIntResult VclIoHandle::listen(int) { + int wrk_index = vclWrkIndexOrRegister(); + RELEASE_ASSERT(wrk_index != -1, "should be initialized"); + + VCL_LOG("trying to listen sh {}", sh_); + RELEASE_ASSERT(is_listener_ == false, ""); + RELEASE_ASSERT(vppcom_session_worker(sh_) == wrk_index, ""); + + is_listener_ = true; + + if (!wrk_index) { + not_listened_ = true; + } + + return {0, 0}; +} + +Envoy::Network::IoHandlePtr VclIoHandle::accept(sockaddr* addr, socklen_t* addrlen) { + int wrk_index = vclWrkIndexOrRegister(); + RELEASE_ASSERT(wrk_index != -1 && is_listener_, "must have worker and must be listener"); + + uint32_t sh = sh_; + if (wrk_index) { + sh = wrk_listener_->sh(); + VCL_LOG("trying to accept fd {} sh {:x}", fd_, sh); + } + + vppcom_endpt_t endpt; + sockaddr_storage ss; + endpt.ip = reinterpret_cast(&ss); + int new_sh = vppcom_session_accept(sh, &endpt, O_NONBLOCK); + if (new_sh >= 0) { + vclEndptCopy(addr, addrlen, endpt); + return std::make_unique(new_sh, VclInvalidFd); + } + return nullptr; +} + +Api::SysCallIntResult +VclIoHandle::connect(Envoy::Network::Address::InstanceConstSharedPtr address) { + if (!VCL_SH_VALID(sh_)) { + return {-1, VPPCOM_EBADFD}; + } + vppcom_endpt_t endpt; + uint8_t ipaddr[sizeof(absl::uint128)]; + endpt.ip = ipaddr; + vclEndptFromAddress(endpt, address); + int32_t rv = vppcom_session_connect(sh_, &endpt); + return {rv < 0 ? -1 : 0, -rv}; +} + +Api::SysCallIntResult VclIoHandle::setOption(int level, int optname, const void* optval, + socklen_t optlen) { + if (!VCL_SH_VALID(sh_)) { + return {-1, VPPCOM_EBADFD}; + } + int32_t rv = 0; + + switch (level) { + case SOL_TCP: + switch (optname) { + case TCP_NODELAY: + rv = + vppcom_session_attr(sh_, VPPCOM_ATTR_SET_TCP_NODELAY, const_cast(optval), &optlen); + break; + case TCP_MAXSEG: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_TCP_USER_MSS, const_cast(optval), + &optlen); + break; + case TCP_KEEPIDLE: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_TCP_KEEPIDLE, const_cast(optval), + &optlen); + break; + case TCP_KEEPINTVL: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_TCP_KEEPINTVL, const_cast(optval), + &optlen); + break; + case TCP_CONGESTION: + case TCP_CORK: + /* Ignore */ + rv = 0; + break; + default: + ENVOY_LOG(error, "setOption() SOL_TCP: sh {} optname {} unsupported!", sh_, optname); + break; + } + break; + case SOL_IPV6: + switch (optname) { + case IPV6_V6ONLY: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_V6ONLY, const_cast(optval), &optlen); + break; + default: + ENVOY_LOG(error, "setOption() SOL_IPV6: sh {} optname {} unsupported!", sh_, optname); + break; + } + break; + case SOL_SOCKET: + switch (optname) { + case SO_KEEPALIVE: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_KEEPALIVE, const_cast(optval), &optlen); + break; + case SO_REUSEADDR: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_REUSEADDR, const_cast(optval), &optlen); + break; + case SO_BROADCAST: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_BROADCAST, const_cast(optval), &optlen); + break; + default: + ENVOY_LOG(error, "setOption() SOL_SOCKET: sh {} optname {} unsupported!", sh_, optname); + break; + } + break; + default: + break; + } + + return {rv < 0 ? -1 : 0, -rv}; +} + +Api::SysCallIntResult VclIoHandle::getOption(int level, int optname, void* optval, + socklen_t* optlen) { + VCL_LOG("trying to get option"); + if (!VCL_SH_VALID(sh_)) { + return {-1, VPPCOM_EBADFD}; + } + int32_t rv = 0; + + switch (level) { + case SOL_TCP: + switch (optname) { + case TCP_NODELAY: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_TCP_NODELAY, optval, optlen); + break; + case TCP_MAXSEG: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_TCP_USER_MSS, optval, optlen); + break; + case TCP_KEEPIDLE: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_TCP_KEEPIDLE, optval, optlen); + break; + case TCP_KEEPINTVL: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_TCP_KEEPINTVL, optval, optlen); + break; + case TCP_INFO: + if (optval && optlen && (*optlen == sizeof(struct tcp_info))) { + ENVOY_LOG(error, "getOption() TCP_INFO: sh %u optname %d unsupported!", sh_, optname); + memset(optval, 0, *optlen); + rv = VPPCOM_OK; + } else { + rv = -EFAULT; + } + break; + case TCP_CONGESTION: + *optlen = strlen("cubic"); + strncpy(static_cast(optval), "cubic", *optlen + 1); + rv = 0; + break; + default: + ENVOY_LOG(error, "getOption() SOL_TCP: sh %u optname %d unsupported!", sh_, optname); + break; + } + break; + case SOL_IPV6: + switch (optname) { + case IPV6_V6ONLY: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_V6ONLY, optval, optlen); + break; + default: + ENVOY_LOG(error, "getOption() SOL_IPV6: sh %u optname %d unsupported!", sh_, optname); + break; + } + break; + case SOL_SOCKET: + switch (optname) { + case SO_ACCEPTCONN: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_LISTEN, optval, optlen); + break; + case SO_KEEPALIVE: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_KEEPALIVE, optval, optlen); + break; + case SO_PROTOCOL: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_PROTOCOL, optval, optlen); + *static_cast(optval) = *static_cast(optval) ? SOCK_DGRAM : SOCK_STREAM; + break; + case SO_SNDBUF: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_TX_FIFO_LEN, optval, optlen); + break; + case SO_RCVBUF: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_RX_FIFO_LEN, optval, optlen); + break; + case SO_REUSEADDR: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_REUSEADDR, optval, optlen); + break; + case SO_BROADCAST: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_BROADCAST, optval, optlen); + break; + case SO_ERROR: + rv = vppcom_session_attr(sh_, VPPCOM_ATTR_GET_ERROR, optval, optlen); + break; + default: + ENVOY_LOG(error, "getOption() SOL_SOCKET: sh %u optname %d unsupported!", sh_, optname); + ; + break; + } + break; + default: + break; + } + return {rv < 0 ? -1 : 0, -rv}; +} + +Api::SysCallIntResult VclIoHandle::ioctl(unsigned long, void*, unsigned long, void*, unsigned long, + unsigned long*) { + return {0, 0}; +} + +Api::SysCallIntResult VclIoHandle::setBlocking(bool) { + uint32_t flags = O_NONBLOCK; + uint32_t buflen = sizeof(flags); + int32_t rv = vppcom_session_attr(sh_, VPPCOM_ATTR_SET_FLAGS, &flags, &buflen); + return {rv < 0 ? -1 : 0, -rv}; +} + +absl::optional VclIoHandle::domain() { + VCL_LOG("grabbing domain sh {:x}", sh_); + return {AF_INET}; +}; + +Envoy::Network::Address::InstanceConstSharedPtr VclIoHandle::localAddress() { + vppcom_endpt_t ep; + uint32_t eplen = sizeof(ep); + uint8_t addr_buf[sizeof(struct sockaddr_in6)]; + ep.ip = addr_buf; + if (vppcom_session_attr(sh_, VPPCOM_ATTR_GET_LCL_ADDR, &ep, &eplen)) { + return nullptr; + } + return vclEndptToAddress(ep, sh_); +} + +Envoy::Network::Address::InstanceConstSharedPtr VclIoHandle::peerAddress() { + VCL_LOG("grabbing peer address sh {:x}", sh_); + vppcom_endpt_t ep; + uint32_t eplen = sizeof(ep); + uint8_t addr_buf[sizeof(struct sockaddr_in6)]; + ep.ip = addr_buf; + if (vppcom_session_attr(sh_, VPPCOM_ATTR_GET_PEER_ADDR, &ep, &eplen)) { + return nullptr; + } + return vclEndptToAddress(ep, sh_); +} + +void VclIoHandle::updateEvents(uint32_t events) { + int wrk_index = vclWrkIndexOrRegister(); + VclIoHandle* vcl_handle = this; + + if (wrk_index && is_listener_) { + vcl_handle = wrk_listener_.get(); + } + + struct epoll_event ev; + ev.events = EPOLLET; + + if (events & Event::FileReadyType::Read) { + ev.events |= EPOLLIN; + } + if (events & Event::FileReadyType::Write) { + ev.events |= EPOLLOUT; + } + if (events & Event::FileReadyType::Closed) { + ev.events |= EPOLLERR | EPOLLHUP; + } + + ev.data.u64 = reinterpret_cast(vcl_handle); + + vppcom_epoll_ctl(vclEpollHandle(wrk_index), EPOLL_CTL_MOD, vcl_handle->sh(), &ev); + vclInterfaceDrainEvents(); +} + +void VclIoHandle::initializeFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb, + Event::FileTriggerType, uint32_t events) { + VCL_LOG("adding events for sh {:x} fd {} isListener {}", sh_, fd_, is_listener_); + + int wrk_index = vclWrkIndexOrRegister(); + vclInterfaceRegisterEpollEvent(dispatcher); + + VclIoHandle* vcl_handle = this; + + if (is_listener_) { + if (wrk_index) { + // If this is not the main worker, make sure a worker listener exists + if (!wrk_listener_) { + vppcom_endpt_t ep; + uint8_t addr_buf[sizeof(struct sockaddr_in6)]; + ep.ip = addr_buf; + uint32_t proto; + + RELEASE_ASSERT(peekVclSession(sh_, &ep, &proto) == false, "peek returned"); + + Address::InstanceConstSharedPtr address = vclEndptToAddress(ep, -1); + uint32_t sh = vppcom_session_create(proto, 1); + wrk_listener_ = std::make_unique(sh, VclInvalidFd); + wrk_listener_->bind(address); + uint32_t rv = vppcom_session_listen(sh, 0 /* ignored */); + if (rv) { + VCL_LOG("listen failed sh {:x}", sh); + return; + } + wrk_listener_->setParentListener(this); + VCL_LOG("add worker listener sh {:x} wrk_index {} new sh {:x}", sh_, wrk_index, sh); + } + vcl_handle = wrk_listener_.get(); + } else if (not_listened_) { + // On main worker, no need to create worker listeners + vppcom_session_listen(sh_, 0 /* ignored */); + not_listened_ = false; + } + } + + struct epoll_event ev; + ev.events = EPOLLET; + + if (events & Event::FileReadyType::Read) { + ev.events |= EPOLLIN; + } + if (events & Event::FileReadyType::Write) { + ev.events |= EPOLLOUT; + } + if (events & Event::FileReadyType::Closed) { + ev.events |= EPOLLERR | EPOLLHUP; + } + + cb_ = cb; + ev.data.u64 = reinterpret_cast(vcl_handle); + vppcom_epoll_ctl(vclEpollHandle(wrk_index), EPOLL_CTL_ADD, vcl_handle->sh(), &ev); + + file_event_ = Event::FileEventPtr{new VclEvent(dispatcher, *vcl_handle, cb)}; + vclInterfaceDrainEvents(); +} + +void VclIoHandle::resetFileEvents() { + if (!file_event_) { + return; + } + // Remove session from epoll fd. This makes sure that when the even is recreated events already + // consumed are regenerated. + int wrk_index = vclWrkIndexOrRegister(); + if (VCL_SH_VALID(sh_) && wrk_index == vppcom_session_worker(sh_)) { + vppcom_epoll_ctl(vclEpollHandle(wrk_index), EPOLL_CTL_DEL, sh_, nullptr); + } + file_event_.reset(); +} + +IoHandlePtr VclIoHandle::duplicate() { + VCL_LOG("duplicate called"); + + // Find what must be duplicated. Assume this is ONLY called for listeners + vppcom_endpt_t ep; + uint8_t addr_buf[sizeof(struct sockaddr_in6)]; + ep.ip = addr_buf; + uint32_t proto; + + RELEASE_ASSERT(peekVclSession(sh_, &ep, &proto) == false, "peek returned"); + + Address::InstanceConstSharedPtr address = vclEndptToAddress(ep, -1); + uint32_t sh = vppcom_session_create(proto, 1); + IoHandlePtr io_handle = std::make_unique(sh, VclInvalidFd); + + io_handle->bind(address); + + return io_handle; +} + +absl::optional VclIoHandle::lastRoundTripTime() { return {}; } + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vcl_io_handle.h b/contrib/vcl/source/vcl_io_handle.h new file mode 100644 index 000000000000..dd1e69690112 --- /dev/null +++ b/contrib/vcl/source/vcl_io_handle.h @@ -0,0 +1,102 @@ +#pragma once + +#include + +#include "envoy/api/io_error.h" +#include "envoy/network/io_handle.h" + +#include "source/common/common/logger.h" +#include "source/common/network/io_socket_error_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Network { +namespace Vcl { + +using namespace Envoy::Network; + +#define VCL_INVALID_SH uint32_t(~0) +#define VCL_SH_VALID(_sh) (_sh != static_cast(~0)) +#define VCL_SET_SH_INVALID(_sh) (_sh = static_cast(~0)) + +class VclIoHandle : public Envoy::Network::IoHandle, + Logger::Loggable, + NonCopyable { +public: + VclIoHandle(uint32_t sh, os_fd_t fd) : sh_(sh), fd_(fd) {} + ~VclIoHandle() override; + + // Network::IoHandle + os_fd_t fdDoNotUse() const override { return fd_; } + Api::IoCallUint64Result close() override; + bool isOpen() const override; + Api::IoCallUint64Result readv(uint64_t max_length, Buffer::RawSlice* slices, + uint64_t num_slice) override; + Api::IoCallUint64Result read(Buffer::Instance& buffer, + absl::optional max_length) override; + Api::IoCallUint64Result writev(const Buffer::RawSlice* slices, uint64_t num_slice) override; + Api::IoCallUint64Result write(Buffer::Instance& buffer) override; + Api::IoCallUint64Result recv(void* buffer, size_t length, int flags) override; + Api::IoCallUint64Result sendmsg(const Buffer::RawSlice* slices, uint64_t num_slice, int flags, + const Envoy::Network::Address::Ip* self_ip, + const Envoy::Network::Address::Instance& peer_address) override; + Api::IoCallUint64Result recvmsg(Buffer::RawSlice* slices, const uint64_t num_slice, + uint32_t self_port, RecvMsgOutput& output) override; + Api::IoCallUint64Result recvmmsg(RawSliceArrays& slices, uint32_t self_port, + RecvMsgOutput& output) override; + absl::optional lastRoundTripTime() override; + + bool supportsMmsg() const override; + bool supportsUdpGro() const override { return false; } + + Api::SysCallIntResult bind(Envoy::Network::Address::InstanceConstSharedPtr address) override; + Api::SysCallIntResult listen(int backlog) override; + Envoy::Network::IoHandlePtr accept(struct sockaddr* addr, socklen_t* addrlen) override; + Api::SysCallIntResult connect(Envoy::Network::Address::InstanceConstSharedPtr address) override; + Api::SysCallIntResult setOption(int level, int optname, const void* optval, + socklen_t optlen) override; + Api::SysCallIntResult getOption(int level, int optname, void* optval, socklen_t* optlen) override; + Api::SysCallIntResult ioctl(unsigned long control_code, void* in_buffer, + unsigned long in_buffer_len, void* out_buffer, + unsigned long out_buffer_len, unsigned long* bytes_returned) override; + Api::SysCallIntResult setBlocking(bool blocking) override; + absl::optional domain() override; + Envoy::Network::Address::InstanceConstSharedPtr localAddress() override; + Envoy::Network::Address::InstanceConstSharedPtr peerAddress() override; + Api::SysCallIntResult shutdown(int) override { return {0, 0}; } + + void initializeFileEvent(Event::Dispatcher& dispatcher, Event::FileReadyCb cb, + Event::FileTriggerType trigger, uint32_t events) override; + void activateFileEvents(uint32_t events) override { file_event_->activate(events); } + void enableFileEvents(uint32_t events) override { file_event_->setEnabled(events); } + void resetFileEvents() override; + IoHandlePtr duplicate() override; + + void cb(uint32_t events) { cb_(events); } + void setCb(Event::FileReadyCb cb) { cb_ = cb; } + void updateEvents(uint32_t events); + uint32_t sh() const { return sh_; } + void clearChildWrkListener() { wrk_listener_ = nullptr; } + VclIoHandle* getParentListener() { return parent_listener_; } + bool isWrkListener() { return parent_listener_ != nullptr; } + +private: + void setParentListener(VclIoHandle* parent_listener) { parent_listener_ = parent_listener; } + + uint32_t sh_{VCL_INVALID_SH}; + os_fd_t fd_; + Event::FileEventPtr file_event_{nullptr}; + bool is_listener_{false}; + bool not_listened_{false}; + // Listener allocated on main thread and shared with worker. VCL listen not called on it. + VclIoHandle* parent_listener_{nullptr}; + // Listener allocated on worker and associated to main thread (parent) listener. VCL listen called + // on it. + std::unique_ptr wrk_listener_{nullptr}; + Event::FileReadyCb cb_; +}; + +} // namespace Vcl +} // namespace Network +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/vcl/source/vpp_vcl_build.sh b/contrib/vcl/source/vpp_vcl_build.sh new file mode 100755 index 000000000000..84577a6388ad --- /dev/null +++ b/contrib/vcl/source/vpp_vcl_build.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e + +VPP_PATH=$1 +DST_PATH=$2 + +# This works only on Linux. +if [[ $(uname) != "Linux" ]]; then + echo "ERROR: VPP VCL is currently supported only on Linux" + exit 1 +fi + +# Log cmake and ninja versions for later debugging +echo "Building VCL" +echo "running $(cmake --version | head -1)" +echo "ninja version $(ninja --version)" + +# Build +pushd "${VPP_PATH}" +mkdir _vcl +cd _vcl +cmake -G Ninja ../src -DCMAKE_BUILD_TYPE:STRING=release +ninja -C . vppcom + +mv CMakeFiles/vcl/libvppcom.a "${DST_PATH}" +mv CMakeFiles/vppinfra/libvppinfra.a "${DST_PATH}" +mv CMakeFiles/svm/libsvm.a "${DST_PATH}" +mv CMakeFiles/vlibmemory/libvlibmemoryclient.a "${DST_PATH}" +cp ../src/vcl/vppcom.h "${DST_PATH}" + +popd diff --git a/docs/conf.py b/docs/conf.py index fb1b0afb1140..5b3a225281ea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -312,47 +312,19 @@ def _config(key): rediraffe_redirects = "envoy-redirects.txt" intersphinx_mapping = { - 'v1.5.0': ('https://www.envoyproxy.io/docs/envoy/v1.5.0', None), - 'v1.6.0': ('https://www.envoyproxy.io/docs/envoy/v1.6.0', None), - 'v1.7.0': ('https://www.envoyproxy.io/docs/envoy/v1.7.1', None), - 'v1.8.0': ('https://www.envoyproxy.io/docs/envoy/v1.8.0', None), - 'v1.9.0': ('https://www.envoyproxy.io/docs/envoy/v1.9.0', None), - 'v1.9.1': ('https://www.envoyproxy.io/docs/envoy/v1.9.1', None), - 'v1.10.0': ('https://www.envoyproxy.io/docs/envoy/v1.10.0', None), - 'v1.11.0': ('https://www.envoyproxy.io/docs/envoy/v1.11.0', None), - 'v1.11.1': ('https://www.envoyproxy.io/docs/envoy/v1.11.1', None), - 'v1.11.2': ('https://www.envoyproxy.io/docs/envoy/v1.11.2', None), - 'v1.12.0': ('https://www.envoyproxy.io/docs/envoy/v1.12.0', None), - 'v1.12.2': ('https://www.envoyproxy.io/docs/envoy/v1.12.2', None), - 'v1.12.3': ('https://www.envoyproxy.io/docs/envoy/v1.12.3', None), - 'v1.12.4': ('https://www.envoyproxy.io/docs/envoy/v1.12.4', None), - 'v1.12.5': ('https://www.envoyproxy.io/docs/envoy/v1.12.5', None), - 'v1.12.6': ('https://www.envoyproxy.io/docs/envoy/v1.12.6', None), - 'v1.13.0': ('https://www.envoyproxy.io/docs/envoy/v1.13.0', None), - 'v1.13.1': ('https://www.envoyproxy.io/docs/envoy/v1.13.1', None), - 'v1.13.2': ('https://www.envoyproxy.io/docs/envoy/v1.13.2', None), - 'v1.13.3': ('https://www.envoyproxy.io/docs/envoy/v1.13.3', None), - 'v1.14.0': ('https://www.envoyproxy.io/docs/envoy/v1.14.0', None), - 'v1.14.2': ('https://www.envoyproxy.io/docs/envoy/v1.14.2', None), - 'v1.14.3': ('https://www.envoyproxy.io/docs/envoy/v1.14.3', None), - 'v1.14.7': ('https://www.envoyproxy.io/docs/envoy/v1.14.7', None), - 'v1.15.0': ('https://www.envoyproxy.io/docs/envoy/v1.15.0', None), - 'v1.15.4': ('https://www.envoyproxy.io/docs/envoy/v1.15.4', None), - 'v1.15.5': ('https://www.envoyproxy.io/docs/envoy/v1.15.5', None), - 'v1.16.0': ('https://www.envoyproxy.io/docs/envoy/v1.16.0', None), - 'v1.16.1': ('https://www.envoyproxy.io/docs/envoy/v1.16.1', None), - 'v1.16.2': ('https://www.envoyproxy.io/docs/envoy/v1.16.2', None), - 'v1.16.3': ('https://www.envoyproxy.io/docs/envoy/v1.16.3', None), - 'v1.16.4': ('https://www.envoyproxy.io/docs/envoy/v1.16.4', None), - 'v1.16.5': ('https://www.envoyproxy.io/docs/envoy/v1.16.5', None), - 'v1.17.0': ('https://www.envoyproxy.io/docs/envoy/v1.17.0', None), - 'v1.17.1': ('https://www.envoyproxy.io/docs/envoy/v1.17.1', None), - 'v1.17.2': ('https://www.envoyproxy.io/docs/envoy/v1.17.2', None), - 'v1.17.3': ('https://www.envoyproxy.io/docs/envoy/v1.17.3', None), - 'v1.17.4': ('https://www.envoyproxy.io/docs/envoy/v1.17.4', None), - 'v1.18.0': ('https://www.envoyproxy.io/docs/envoy/v1.18.2', None), - 'v1.18.3': ('https://www.envoyproxy.io/docs/envoy/v1.18.3', None), - 'v1.18.4': ('https://www.envoyproxy.io/docs/envoy/v1.18.4', None), - 'v1.19.0': ('https://www.envoyproxy.io/docs/envoy/v1.19.0', None), - 'v1.19.1': ('https://www.envoyproxy.io/docs/envoy/v1.19.1', None), + 'v1.5': ('https://www.envoyproxy.io/docs/envoy/v1.5.0', None), + 'v1.6': ('https://www.envoyproxy.io/docs/envoy/v1.6.0', None), + 'v1.7': ('https://www.envoyproxy.io/docs/envoy/v1.7.1', None), + 'v1.8': ('https://www.envoyproxy.io/docs/envoy/v1.8.0', None), + 'v1.9': ('https://www.envoyproxy.io/docs/envoy/v1.9.1', None), + 'v1.10': ('https://www.envoyproxy.io/docs/envoy/v1.10.0', None), + 'v1.11': ('https://www.envoyproxy.io/docs/envoy/v1.11.2', None), + 'v1.12': ('https://www.envoyproxy.io/docs/envoy/v1.12.6', None), + 'v1.13': ('https://www.envoyproxy.io/docs/envoy/v1.13.3', None), + 'v1.14': ('https://www.envoyproxy.io/docs/envoy/v1.14.7', None), + 'v1.15': ('https://www.envoyproxy.io/docs/envoy/v1.15.5', None), + 'v1.16': ('https://www.envoyproxy.io/docs/envoy/v1.16.5', None), + 'v1.17': ('https://www.envoyproxy.io/docs/envoy/v1.17.4', None), + 'v1.18': ('https://www.envoyproxy.io/docs/envoy/v1.18.4', None), + 'v1.19': ('https://www.envoyproxy.io/docs/envoy/v1.19.1', None), } diff --git a/docs/root/_include/tcp_stats.rst b/docs/root/_include/tcp_stats.rst new file mode 100644 index 000000000000..9d38f4996daf --- /dev/null +++ b/docs/root/_include/tcp_stats.rst @@ -0,0 +1,19 @@ +.. note:: + These metrics are provided by the operating system. Due to differences in operating system metrics available and the methodology + used to take measurements, the values may not be consistent across different operating systems or versions of the same operating + system. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + cx_tx_segments, Counter, Total TCP segments transmitted + cx_rx_segments, Counter, Total TCP segments received + cx_tx_data_segments, Counter, Total TCP segments with a non-zero data length transmitted + cx_rx_data_segments, Counter, Total TCP segments with a non-zero data length received + cx_tx_retransmitted_segments, Counter, Total TCP segments retransmitted + cx_tx_unsent_bytes, Gauge, Bytes which Envoy has sent to the operating system which have not yet been sent + cx_tx_unacked_segments, Gauge, Segments which have been transmitted that have not yet been acknowledged + cx_tx_percent_retransmitted_segments, Histogram, Percent of segments on a connection which were retransmistted + cx_rtt_us, Histogram, Smoothed round trip time estimate in microseconds + cx_rtt_variance_us, Histogram, Estimated variance in microseconds of the round trip time. Higher values indicated more variability. diff --git a/docs/root/_static/slow_start_aggression.svg b/docs/root/_static/slow_start_aggression.svg new file mode 100644 index 000000000000..aac119a0b335 --- /dev/null +++ b/docs/root/_static/slow_start_aggression.svg @@ -0,0 +1,2049 @@ + + + + + + + + 2021-04-26T00:13:24.988771 + image/svg+xml + + + Matplotlib v3.4.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/root/_static/slow_start_example.svg b/docs/root/_static/slow_start_example.svg new file mode 100644 index 000000000000..9bd88ce1401f --- /dev/null +++ b/docs/root/_static/slow_start_example.svg @@ -0,0 +1,1053 @@ + + + + + + + + 2021-09-10T13:39:07.873353 + image/svg+xml + + + Matplotlib v3.4.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/root/api-v3/bootstrap/bootstrap.rst b/docs/root/api-v3/bootstrap/bootstrap.rst index 51d7b817c66d..4a8381a0c90f 100644 --- a/docs/root/api-v3/bootstrap/bootstrap.rst +++ b/docs/root/api-v3/bootstrap/bootstrap.rst @@ -10,4 +10,5 @@ Bootstrap ../config/metrics/v3/metrics_service.proto ../config/overload/v3/overload.proto ../config/ratelimit/v3/rls.proto + ../extensions/vcl/v3alpha/vcl_socket_interface.proto ../extensions/wasm/v3/wasm.proto diff --git a/docs/root/api-v3/config/accesslog/accesslog.rst b/docs/root/api-v3/config/accesslog/accesslog.rst index ecd70c2f80d2..dae49773f788 100644 --- a/docs/root/api-v3/config/accesslog/accesslog.rst +++ b/docs/root/api-v3/config/accesslog/accesslog.rst @@ -9,4 +9,3 @@ Access loggers v3/* ../../extensions/access_loggers/*/v3/* - ../../extensions/access_loggers/*/v3alpha/* diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index d79d3cd79c17..6d4034ff5b8d 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -12,6 +12,7 @@ Extensions rbac/rbac health_checker/health_checker transport_socket/transport_socket + dns_resolver/dns_resolver.rst resource_monitor/resource_monitor common/common compression/compression diff --git a/docs/root/api-v3/config/dns_resolver/dns_resolver.rst b/docs/root/api-v3/config/dns_resolver/dns_resolver.rst new file mode 100644 index 000000000000..3756a5109ca2 --- /dev/null +++ b/docs/root/api-v3/config/dns_resolver/dns_resolver.rst @@ -0,0 +1,8 @@ +DNS Resolver +================= + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/network/dns_resolver/*/v3/* diff --git a/docs/root/api-v3/config/filter/http/http.rst b/docs/root/api-v3/config/filter/http/http.rst index 861a920b5a8e..51b746e7edb6 100644 --- a/docs/root/api-v3/config/filter/http/http.rst +++ b/docs/root/api-v3/config/filter/http/http.rst @@ -7,3 +7,5 @@ HTTP filters */empty/* ../../../extensions/filters/http/*/v3*/* + ../../../extensions/cache/*/v3*/* + diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index c430280ca06a..880e066946e2 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -6,4 +6,3 @@ UDP listener filters :maxdepth: 2 ../../../extensions/filters/udp/*/v3/* - ../../../extensions/filters/udp/*/v3alpha/* diff --git a/docs/root/api-v3/config/watchdog/watchdog.rst b/docs/root/api-v3/config/watchdog/watchdog.rst index 8a8ab843cb0e..439698b3a943 100644 --- a/docs/root/api-v3/config/watchdog/watchdog.rst +++ b/docs/root/api-v3/config/watchdog/watchdog.rst @@ -11,5 +11,5 @@ Watchdog :glob: :maxdepth: 2 - ../../extensions/watchdog/profile_action/v3alpha/* - ../../watchdog/v3alpha/* + ../../extensions/watchdog/profile_action/v3/* + ../../watchdog/v3/* diff --git a/docs/root/api-v3/service/service.rst b/docs/root/api-v3/service/service.rst index d651856c678b..a65686099df1 100644 --- a/docs/root/api-v3/service/service.rst +++ b/docs/root/api-v3/service/service.rst @@ -17,3 +17,4 @@ Services ../config/tap/v3/* trace/v3/* extension/v3/* + ext_proc/v3/* diff --git a/docs/root/api/client_features.rst b/docs/root/api/client_features.rst index 67e73283b7eb..3d923fba96e2 100644 --- a/docs/root/api/client_features.rst +++ b/docs/root/api/client_features.rst @@ -14,7 +14,7 @@ Currently Defined Client Features - **envoy.config.require-any-fields-contain-struct**: This feature indicates that xDS client requires that the configuration entries of type *google.protobuf.Any* contain messages of type - *udpa.type.v1.TypedStruct* only. + *xds.type.v3.TypedStruct* (or, for historical reasons, *udpa.type.v1.TypedStruct*) only. - **envoy.lb.does_not_support_overprovisioning**: This feature indicates that the client does not support overprovisioning for priority failover and locality weighting as configured by the :ref:`overprovisioning_factor ` diff --git a/docs/root/configuration/http/http_conn_man/response_code_details.rst b/docs/root/configuration/http/http_conn_man/response_code_details.rst index 58acfe7b9c51..642b186557eb 100644 --- a/docs/root/configuration/http/http_conn_man/response_code_details.rst +++ b/docs/root/configuration/http/http_conn_man/response_code_details.rst @@ -120,4 +120,5 @@ All http3 details are rooted at *http3.* http3.too_many_trailers, Either incoming request or response trailers contained too many entries. http3.remote_refuse, The peer refused the stream. http3.remote_reset, The peer reset the stream. + http3.inconsistent_content_length, The payload size is different from what was indicated by the content-length header. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index a12c2adcc2af..efb77be6f17e 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -196,6 +196,7 @@ On the upstream side all http3 statistics are rooted at *cluster..http3.* rx_reset, Counter, Total number of reset stream frames received by Envoy tx_reset, Counter, Total number of reset stream frames transmitted by Envoy metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/3 encoding + quic_version_h3_29, Counter, Total number of quic connections that use transport version h3-29. QUIC h3-29 is unsupported by default and this counter will be removed when h3-29 support is completely removed. quic_version_rfc_v1, Counter, Total number of quic connections that use transport version rfc-v1. diff --git a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml index 00d3415149f5..05674eb5dd91 100644 --- a/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/bandwidth-limit-filter.yaml @@ -21,7 +21,7 @@ static_resources: route: {cluster: service_protected_by_bandwidth_limit} typed_per_filter_config: envoy.filters.http.bandwidth_limit: - "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3alpha.BandwidthLimit + "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3.BandwidthLimit stat_prefix: bandwidth_limiter_custom_route enable_mode: REQUEST_AND_RESPONSE limit_kbps: 500 @@ -31,7 +31,7 @@ static_resources: http_filters: - name: envoy.filters.http.bandwidth_limit typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3alpha.BandwidthLimit + "@type": type.googleapis.com/envoy.extensions.filters.http.bandwidth_limit.v3.BandwidthLimit stat_prefix: bandwidth_limiter_default - name: envoy.filters.http.router clusters: diff --git a/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker-apple.yaml b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker-apple.yaml new file mode 100644 index 000000000000..3f12850ba949 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker-apple.yaml @@ -0,0 +1,4 @@ +typed_dns_resolver_config: + name: envoy.network.dns_resolver.apple + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.apple.v3.AppleDnsResolverConfig diff --git a/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml index 427600e10239..bb078c9040a6 100644 --- a/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml +++ b/docs/root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml @@ -43,14 +43,17 @@ static_resources: dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - dns_resolution_config: - resolvers: - - socket_address: - address: "8.8.8.8" - port_value: 53 - dns_resolver_options: - use_tcp_for_dns_lookups: true - no_default_search_domain: true + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "8.8.8.8" + port_value: 53 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true - name: envoy.filters.http.router typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router @@ -64,14 +67,17 @@ static_resources: dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - dns_resolution_config: - resolvers: - - socket_address: - address: "8.8.8.8" - port_value: 53 - dns_resolver_options: - use_tcp_for_dns_lookups: true - no_default_search_domain: true + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "8.8.8.8" + port_value: 53 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true transport_socket: name: envoy.transport_sockets.tls typed_config: diff --git a/docs/root/configuration/http/http_filters/admission_control_filter.rst b/docs/root/configuration/http/http_filters/admission_control_filter.rst index ac974b2f4067..bc08bc402b81 100644 --- a/docs/root/configuration/http/http_filters/admission_control_filter.rst +++ b/docs/root/configuration/http/http_filters/admission_control_filter.rst @@ -7,7 +7,7 @@ Admission Control The admission control filter is experimental and is currently under active development. -See the :ref:`v3 API reference ` for details on each configuration parameter. +See the :ref:`v3 API reference ` for details on each configuration parameter. Overview -------- @@ -56,11 +56,11 @@ Note that there are additional parameters that affect the rejection probability: Health check traffic does not count towards any of the filter's measurements. See the :ref:`v3 API reference -` for more +` for more details on this parameter. The definition of a successful request is a :ref:`configurable parameter -` +` for both HTTP and gRPC requests. Aggression @@ -85,7 +85,7 @@ fields can be overridden via runtime settings. name: envoy.filters.http.admission_control typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl enabled: default_value: true runtime_key: "admission_control.enabled" diff --git a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst index 4576e9d3ac36..fa0b6d27b96c 100644 --- a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst @@ -4,12 +4,12 @@ Bandwidth limit ==================== * Bandwidth limiting :ref:`architecture overview ` -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name ``envoy.filters.http.bandwidth_limit``. The HTTP Bandwidth limit filter limits the size of data flow to the max bandwidth set in the ``limit_kbps`` when the request's route, virtual host or filter chain has a -:ref:`bandwidth limit configuration `. +:ref:`bandwidth limit configuration `. If the bandwidth limit has been exhausted the filter stops further transfer until more bandwidth gets allocated according to the ``fill_interval`` (default is 50 milliseconds). If the connection buffer fills up with accumulated @@ -60,5 +60,5 @@ Runtime The HTTP bandwidth limit filter supports the following runtime settings: The bandwidth limit filter can be runtime feature flagged via the :ref:`enabled -` +` configuration field. diff --git a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst index e3016f972d62..c89ea52c59f5 100644 --- a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst +++ b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst @@ -26,7 +26,7 @@ Configuration The filter is configured with the name *envoy.filters.http.cdn_loop*. -The :ref:`filter config ` has two fields. +The :ref:`filter config ` has two fields. * The *cdn_id* field sets the identifier that the filter will look for within and append to the CDN-Loop header. RFC 8586 calls this field the "cdn-id"; "cdn-id" can either be a pseudonym or a diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index f39bab62c676..31a87282c865 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -49,8 +49,8 @@ An example configuration of the filter may look like the following: "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip memory_level: 3 window_bits: 10 - compression_level: best_compression - compression_strategy: default_strategy + compression_level: BEST_COMPRESSION + compression_strategy: DEFAULT_STRATEGY By *default* request compression is disabled, but when enabled it will be *skipped* if: @@ -134,8 +134,8 @@ multiple compressor filters enabled only for requests or responses. For instance "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip memory_level: 3 window_bits: 10 - compression_level: best_compression - compression_strategy: default_strategy + compression_level: BEST_COMPRESSION + compression_strategy: DEFAULT_STRATEGY # This filter is only enabled for requests. - name: envoy.filters.http.compressor typed_config: @@ -156,8 +156,8 @@ multiple compressor filters enabled only for requests or responses. For instance "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip memory_level: 9 window_bits: 15 - compression_level: best_speed - compression_strategy: default_strategy + compression_level: BEST_SPEED + compression_strategy: DEFAULT_STRATEGY .. _compressor-statistics: diff --git a/docs/root/configuration/http/http_filters/cors_filter.rst b/docs/root/configuration/http/http_filters/cors_filter.rst index f7109ef6eaa9..5a49769fed29 100644 --- a/docs/root/configuration/http/http_filters/cors_filter.rst +++ b/docs/root/configuration/http/http_filters/cors_filter.rst @@ -8,7 +8,7 @@ For the meaning of the headers please refer to the pages below. * https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS * https://www.w3.org/TR/cors/ -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.cors*. .. _cors-runtime: diff --git a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst index d2c9530207ee..7d7a4148ee21 100644 --- a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst @@ -36,6 +36,12 @@ host when forwarding. See the example below within the configured routes. .. literalinclude:: _include/dns-cache-circuit-breaker.yaml :language: yaml +Above example is using typed config :ref:`CaresDnsResolverConfig`. +To use :ref:`AppleDnsResolverConfig` (iOS/macOS only), follow below example: + +.. literalinclude:: _include/dns-cache-circuit-breaker-apple.yaml + :language: yaml + Statistics ---------- diff --git a/docs/root/configuration/http/http_filters/ext_proc_filter.rst b/docs/root/configuration/http/http_filters/ext_proc_filter.rst index 989413a87817..d0b5544f4e24 100644 --- a/docs/root/configuration/http/http_filters/ext_proc_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_proc_filter.rst @@ -2,7 +2,7 @@ External Processing =================== -* :ref:`Http filter v3 API reference ` +* :ref:`Http filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_proc* The external processing filter connects an external service, called an "external processor," @@ -12,9 +12,9 @@ and modifying the headers, body, and trailers of each message, or by returning a The protocol itself is based on a bidirectional gRPC stream. Envoy will send the external processor -:ref:`ProcessingRequest ` +:ref:`ProcessingRequest ` messages, and the processor must reply with -:ref:`ProcessingResponse ` +:ref:`ProcessingResponse ` messages. Configuration options are provided to control which events are sent to the processor. @@ -26,7 +26,7 @@ stream requests from the proxy. This filter is a work in progress. Most of the major bits of functionality are complete. The updated list of supported features and implementation status may -be found on the :ref:`reference page `. +be found on the :ref:`reference page `. Statistics ---------- diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index cb77f65eed62..dea25d116ed7 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -46,16 +46,3 @@ HTTP filters sxg_filter tap_filter wasm_filter - -.. TODO(toddmgreer): Remove this hack and add user-visible CacheFilter docs when CacheFilter is production-ready. -.. toctree:: - :hidden: - - ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto - ../../../api-v3/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto - ../../../api-v3/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto - ../../../api-v3/service/ext_proc/v3alpha/external_processor.proto - ../../../api-v3/extensions/filters/http/oauth2/v3alpha/oauth.proto - ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto - ../../../api-v3/extensions/cache/simple_http_cache/v3alpha/config.proto - ../../../api-v3/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto diff --git a/docs/root/configuration/http/http_filters/oauth2_filter.rst b/docs/root/configuration/http/http_filters/oauth2_filter.rst index 0ea1b97c95d0..d28789ca8889 100644 --- a/docs/root/configuration/http/http_filters/oauth2_filter.rst +++ b/docs/root/configuration/http/http_filters/oauth2_filter.rst @@ -4,38 +4,44 @@ OAuth2 ====== -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.oauth2*. The OAuth filter's flow involves: * An unauthenticated user arrives at myapp.com, and the oauth filter redirects them to the - :ref:`authorization_endpoint ` - for login. The :ref:`client_id ` - and the :ref:`redirect_uri ` + :ref:`authorization_endpoint ` + for login. The :ref:`client_id ` + and the :ref:`redirect_uri ` are sent as query string parameters in this first redirect. * After a successful login, the authn server should be configured to redirect the user back to the - :ref:`redirect_uri ` + :ref:`redirect_uri ` provided in the query string in the first step. In the below code example, we choose /callback as the configured match path. An "authorization grant" is included in the query string for this second redirect. -* Using this new grant and the :ref:`token_secret `, +* Using this new grant and the :ref:`token_secret `, the filter then attempts to retrieve an access token from - the :ref:`token_endpoint `. The filter knows it has to do this + the :ref:`token_endpoint `. The filter knows it has to do this instead of reinitiating another login because the incoming request has a path that matches the - :ref:`redirect_path_matcher ` criteria. + :ref:`redirect_path_matcher ` criteria. * Upon receiving an access token, the filter sets cookies so that subseqeuent requests can skip the full flow. These cookies are calculated using the - :ref:`hmac_secret ` + :ref:`hmac_secret ` to assist in encoding. * The filter calls continueDecoding() to unblock the filter chain. When the authn server validates the client and returns an authorization token back to the OAuth filter, no matter what format that token is, if -:ref:`forward_bearer_token ` +:ref:`forward_bearer_token ` is set to true the filter will send over a cookie named ``BearerToken`` to the upstream. Additionally, the ``Authorization`` header will be populated with the same value. +.. note:: + By default, OAuth2 filter sets some cookies with the following names: + ``BearerToken``, ``OauthHMAC``, and ``OauthExpires``. These cookie names can be customized by + setting + :ref:`cookie_names `. + .. attention:: The OAuth2 filter is currently under active development. @@ -46,7 +52,7 @@ Example configuration The following is an example configuring the filter. .. validated-code-block:: yaml - :type-name: envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + :type-name: envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: @@ -99,7 +105,7 @@ Below is a complete code example of how we employ the filter as one of http_filters: - name: envoy.filters.http.oauth2 typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: cluster: oauth @@ -178,8 +184,8 @@ Below is a complete code example of how we employ the filter as one of sni: auth.example.com Finally, the following code block illustrates sample contents inside a yaml file containing both credential secrets. -Both the :ref:`token_secret ` -and the :ref:`hmac_secret ` +Both the :ref:`token_secret ` +and the :ref:`hmac_secret ` can be defined in one shared file. .. code-block:: yaml @@ -203,14 +209,14 @@ It is recommended to pair this filter with the :ref:`CSRF Filter ` +:ref:`authorization_endpoint ` provider will likely reject the incoming request, and your access cookies will not be cached to bypass future logins. The signout path will redirect the current user to '/', and clear all authentication cookies related to the HMAC validation. Consequently, the OAuth filter will then restart the full OAuth flow at the root path, sending the user to the configured auth endpoint. -:ref:`pass_through_matcher ` provides +:ref:`pass_through_matcher ` provides an interface for users to provide specific header matching criteria such that, when applicable, the OAuth flow is entirely skipped. When this occurs, the ``oauth_success`` metric is still incremented. diff --git a/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml index 2e30456d03d0..ec2306721083 100644 --- a/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml +++ b/docs/root/configuration/listeners/network_filters/_include/sni-dynamic-forward-proxy-filter.yaml @@ -18,7 +18,7 @@ static_resources: - filters: - name: envoy.filters.network.sni_dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig port_value: 443 dns_cache_config: name: dynamic_forward_proxy_cache_config diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 4ad0a29a9c14..4b3ed840237f 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -36,6 +36,16 @@ The following TLS statistics are rooted at *listener.
.ssl.*: .. include:: ../../_include/ssl_stats.rst +.. _config_listener_stats_tcp: + +TCP statistics +-------------- + +The following TCP statistics, which are available when using the :ref:`TCP stats transport socket `, +are rooted at *listener.
.tcp_stats.*: + +.. include:: ../../_include/tcp_stats.rst + .. _config_listener_stats_udp: UDP statistics diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 1c2cab456f23..92724400ecae 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -7,7 +7,7 @@ DNS Filter DNS Filter is under active development and should be considered alpha and not production ready. -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.dns_filter* Overview @@ -24,7 +24,7 @@ will use for external resolution. Users can disable external DNS resolution by o client configuration object. The filter supports :ref:`per-filter configuration -`. +`. An Example configuration follows that illustrates how the filter can be used. Example Configuration @@ -35,7 +35,7 @@ Example Configuration listener_filters: name: envoy.filters.udp.dns_filter typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig" stat_prefix: "dns_filter_prefix" client_config: resolution_timeout: 5s @@ -131,7 +131,7 @@ Example External DnsTable Configuration listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "my_prefix" server_config: external_dns_table: diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 722ad1beb399..383e6e12b1a8 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -241,6 +241,62 @@ The following command operators are supported: TCP Downstream bytes sent on connection. +%UPSTREAM_WIRE_BYTES_SENT% + HTTP + Total number of bytes sent to the upstream by the http stream. + + TCP + Not implemented (0). + +%UPSTREAM_WIRE_BYTES_RECEIVED% + HTTP + Total number of bytes received from the upstream by the http stream. + + TCP + Not implemented (0). + +%UPSTREAM_HEADER_BYTES_SENT% + HTTP + Number of header bytes sent to the upstream by the http stream. + + TCP + Not implemented (0). + +%UPSTREAM_HEADER_BYTES_RECEIVED% + HTTP + Number of header bytes received from the upstream by the http stream. + + TCP + Not implemented (0). + +%DOWNSTREAM_WIRE_BYTES_SENT% + HTTP + Total number of bytes sent to the downstream by the http stream. + + TCP + Not implemented (0). + +%DOWNSTREAM_WIRE_BYTES_RECEIVED% + HTTP + Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + + TCP + Not implemented (0). + +%DOWNSTREAM_HEADER_BYTES_SENT% + HTTP + Number of header bytes sent to the downstream by the http stream. + + TCP + Not implemented (0). + +%DOWNSTREAM_HEADER_BYTES_RECEIVED% + HTTP + Number of header bytes received from the downstream by the http stream. + + TCP + Not implemented (0). + Renders a numeric value in typed JSON logs. %DURATION% diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index 57188dfde65c..930414a92958 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -33,6 +33,7 @@ Server related statistics are rooted at *server.* with following statistics: envoy_bug_failures, Counter, Number of envoy bug failures detected in a release build. File or report the issue if this increments as this may be serious. static_unknown_fields, Counter, Number of messages in static configuration with unknown fields dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields + wip_protos, Counter, Number of messages and fields marked as work-in-progress being used .. _server_compilation_settings_statistics: diff --git a/docs/root/configuration/other_features/other_features.rst b/docs/root/configuration/other_features/other_features.rst index c6ecf4629840..21414104fedd 100644 --- a/docs/root/configuration/other_features/other_features.rst +++ b/docs/root/configuration/other_features/other_features.rst @@ -5,5 +5,6 @@ Other features :maxdepth: 2 rate_limit + vcl wasm wasm_service diff --git a/docs/root/configuration/other_features/vcl.rst b/docs/root/configuration/other_features/vcl.rst new file mode 100644 index 000000000000..e7a38dba925f --- /dev/null +++ b/docs/root/configuration/other_features/vcl.rst @@ -0,0 +1,122 @@ +.. _config_sock_interface_vcl: + +VCL Socket Interface +==================== + +* :ref:`v3 API reference ` + +.. attention:: + + The VCL socket interface extension is experimental and is currently under active development. + +This socket interface extension provides Envoy with high speed L2-L7 user space networking by integrating with `fd.io VPP `_ through VPP's ``Comms`` Library (VCL). + +The VCL socket interface is only included in :ref:`contrib images ` + +Example configuration +--------------------- + +.. code-block:: yaml + + bootstrap_extensions: + - name: envoy.extensions.vcl.vcl_socket_interface + typed_config: + "@type": type.googleapis.com/envoy.extensions.vcl.v3alpha.VclSocketInterface + default_socket_interface: "envoy.extensions.vcl.vcl_socket_interface" + +How it works +------------ + +If enabled, the extension attaches through a VCL interface (``vcl_interface.h``) to VCL, and consequently to the external VPP process, when it is initialized during Envoy bootstrap. This registers a main VCL worker, while subsequent Envoy workers are registered whenever the socket interface extension detects that its code is being executed by a pthread that has not yet been registered with VCL. + +Because both libevent and VCL want to handle the async polling and the dispatching of ``IoHandles``, the VCL interface delegates control to libevent by registering with it, for each Envoy worker, the eventfd associated to the VCL worker's VPP message queue. +These shared memory message queues are used by VPP to convey io/ctrl events to VCL and the eventfds are used to signal message queue transitions from empty to non-empty state. +This ultimately means that VPP generated events force libevent to hand over control to the VCL interface which, for each Envoy worker, uses an internally maintained epoll fd to poll/pull events from VCL and subsequently dispatch them. +To support all of these indirect interactions, the socket interface makes use of custom ``IoHandle`` and ``FileEvent`` implementations that convert between Envoy and VCL API calls. + +Installing and running VPP/VCL +------------------------------ + +For information on how to build and/or install VPP see the getting started guide `here `_. Assuming the use of DPDK interfaces, a minimal `startup.conf` file that also configures the host stack would consist of: + +.. code-block:: text + + unix { + # Run in interactive mode and not as a daemon + nodaemon + interactive + + # Cli socket to be used by vppctl + cli-listen /run/vpp/cli.sock + + # Group id is an example + gid vpp + } + + cpu { + # Avoid using core 0 and run vpp's main thread on core 1 + skip-cores 0 + main-core 1 + + # Set logical CPU core(s) where worker threads are running. For performance testing make + # sure the cores are on the same numa as the NIC(s). Use lscpu to determine the numa of + # a cpu and "sh hardware" in vpp cli to determine the numa of a NIC. To configure multiple + # workers lists are also possible, e.g., corelist-workers 2-4,6 + corelist-workers 2 + } + + buffers { + # Default is 16384 (8192 if running unpriviledged) + buffers-per-numa 16384 + } + + dpdk { + # Notes: + # - Assuming only one NIC is used + # - The PCI address is an example, the actual one should be found using something like dpdk_devbind + # https://github.com/DPDK/dpdk/blob/main/usertools/dpdk-devbind.py + # - Number of rx queues (num-rx-queus) should be number of workers + dev 0000:18:00.0 { + num-tx-desc 256 + num-rx-desc 256 + num-rx-queues 1 + } + } + + session { + # Use session layer socket api for VCL attachments + use-app-socket-api + + # VPP worker's message queues lengths + event-queue-length 100000 + } + +Manually start VPP, once a binary is obtained: `./vpp -c startup.conf` + +VCL can be configured by either adding a configuration file to `/etc/vpp/vcl.conf` or by pointing the `VCL_CONFIG` environment variable to a configuration file. A minimal example that can be used for RPS load testing can be found lower: + +.. code-block:: text + + vcl { + # Max rx/tx session buffers sizes in bytes. Increase for high throughput traffic. + rx-fifo-size 400000 + tx-fifo-size 400000 + + # Size of shared memory segments between VPP and VCL in bytes + segment-size 1000000000 + + # App has access to global routing table + app-scope-global + + # Allow inter-app shared-memory cut-through sessions + app-scope-local + + # Pointer to session layer's socket api socket + app-socket-api /var/run/vpp/app_ns_sockets/default + + # Message queues use eventfds for notifications + use-mq-eventfd + + # VCL worker incoming message queue size + event-queue-size 40000 + } diff --git a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst index 02e3a1fe264f..3ef39e135f73 100644 --- a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst @@ -7,5 +7,5 @@ The router filter implements Dubbo forwarding. It will be used in almost all Dub scenarios. The filter's main job is to follow the instructions specified in the configured :ref:`route table `. -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.dubbo.router*. diff --git a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst index 97c4eb416129..e7f5b017a4d3 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst @@ -40,11 +40,25 @@ Since these stats utilize the underlying cluster scope, we prefix with the ``thr thrift.upstream_resp_success, Counter, Total Replies that are considered "Successes". thrift.upstream_resp_error, Counter, Total Replies that are considered "Errors". thrift.upstream_resp_exception, Counter, Total responses with the "Exception" message type. + thrift.upstream_resp_exception_local, Counter, Total responses with the "Exception" message type generated locally. + thrift.upstream_resp_exception_remote, Counter, Total responses with the "Exception" message type received from remote. thrift.upstream_resp_invalid_type, Counter, Total responses with an unsupported message type. + thrift.upstream_resp_decoding_error, Counter, Total responses with an error during decoding. thrift.upstream_rq_time, Histogram, total rq time from rq complete to resp complete; includes oneway messages. thrift.upstream_rq_size, Histogram, Request message size in bytes per upstream thrift.upstream_resp_size, Histogram, Response message size in bytes per upstream +If the service zone is available for both the local service (via :option:`--service-zone`) +and the :ref:`upstream cluster `, +Envoy will track the following statistics in *cluster..zone...* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + thrift.upstream_resp_<\*>, Counter, "Total responses of each type (e.g., reply, success, etc.)" + thrift.upstream_rq_time, Histogram, Request time milliseconds + .. note:: The request and response size histograms include what's sent and received during protocol upgrade. diff --git a/docs/root/configuration/overview/extension.rst b/docs/root/configuration/overview/extension.rst index a9685edce5b9..792f384f0f57 100644 --- a/docs/root/configuration/overview/extension.rst +++ b/docs/root/configuration/overview/extension.rst @@ -35,7 +35,7 @@ filter configuration snippet is permitted: dynamic_stats: true In case the control plane lacks the schema definitions for an extension, -``udpa.type.v1.TypedStruct`` should be used as a generic container. The type URL +``xds.type.v3.TypedStruct`` should be used as a generic container. The type URL inside it is then used by a client to convert the contents to a typed configuration resource. For example, the above example could be written as follows: @@ -44,7 +44,7 @@ follows: name: front-http-proxy typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct + "@type": type.googleapis.com/xds.type.v3.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager value: stat_prefix: ingress_http @@ -62,7 +62,7 @@ follows: http_filters: - name: front-router typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct + "@type": type.googleapis.com/xds.type.v3.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router .. _config_overview_extension_discovery: diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index a7914866c29d..23775aaa8b52 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -42,6 +42,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_cx_connect_fail, Counter, Total connection failures upstream_cx_connect_timeout, Counter, Total connection connect timeouts upstream_cx_idle_timeout, Counter, Total connection idle timeouts + upstream_cx_max_duration_reached, Counter, Total connections closed due to max duration reached upstream_cx_connect_attempts_exceeded, Counter, Total consecutive connection failures exceeding configured connection attempts upstream_cx_overflow, Counter, Total times that the cluster's connection circuit breaker overflowed upstream_cx_connect_ms, Histogram, Connection establishment milliseconds @@ -244,6 +245,16 @@ If TLS is used by the cluster the following statistics are rooted at *cluster.`, +are rooted at *cluster..tcp_stats.*: + +.. include:: ../../../_include/tcp_stats.rst + .. _config_cluster_manager_cluster_stats_alt_tree: Alternate tree dynamic HTTP statistics diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 89177db38b7f..2cabbd0991f1 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -32,18 +32,20 @@ Connection timeouts apply to the entire HTTP connection and all streams the conn * The HTTP protocol :ref:`max_connection_duration ` is defined in a generic message used by both the HTTP connection manager as well as upstream cluster - HTTP connections but is currently only implemented for the downstream connections. The maximum - connection duration is the time after which a downstream connection will be drained and/or closed, - starting from when it first got established. If there are no active streams, the connection will be - closed. If there are any active streams, the drain sequence will kick-in, and the connection will be - force-closed after the drain period. The default value of max connection duration is *0* or unlimited, - which means that the connections will never be closed due to aging. It could be helpful in scenarios - when you are running a pool of Envoy edge-proxies and would want to close a downstream connection after - some time to prevent sticky-ness. It could also help to better load balance the overall traffic among - this pool, especially if the size of this pool is dynamically changing. To modify the max connection - duration for downstream connections use the + HTTP connections. The maximum connection duration is the time after which a downstream or upstream + connection will be drained and/or closed, starting from when it was first established. If there are no + active streams, the connection will be closed. If there are any active streams, the drain sequence will + kick-in, and the connection will be force-closed after the drain period. The default value of max connection + duration is *0* or unlimited, which means that the connections will never be closed due to aging. It could + be helpful in scenarios when you are running a pool of Envoy edge-proxies and would want to close a + downstream connection after some time to prevent stickiness. It could also help to better load balance the + overall traffic among this pool, especially if the size of this pool is dynamically changing. Finally, it + may help with upstream connections when using a DNS name whose resolved addresses may change even if the + upstreams stay healthly. Forcing a maximum upstream lifetime in this scenario prevents holding onto healthy + connections even after they would otherwise be undiscoverable. To modify the max connection duration for downstream connections use the :ref:`common_http_protocol_options ` - field in the HTTP connection manager configuration. + field in the HTTP connection manager configuration. To modify the max connection duration for upstream connections use the + :ref:`common_http_protocol_options ` field in the cluster configuration. See :ref:`below ` for other connection timeouts. diff --git a/docs/root/faq/windows/win_not_supported_features.rst b/docs/root/faq/windows/win_not_supported_features.rst index 8e002a1f182c..76c1b8da8467 100644 --- a/docs/root/faq/windows/win_not_supported_features.rst +++ b/docs/root/faq/windows/win_not_supported_features.rst @@ -9,6 +9,7 @@ The most notable features that are not supported on Windows are: * :ref:`Original Src HTTP Filter `. * :ref:`Hot restart ` * :ref:`Signed Exchange Filter ` +* :ref:`VCL Socket Interface ` There are certain Envoy features that require newer versions of Windows. These features explicitly document the required version. diff --git a/docs/root/intro/arch_overview/advanced/matching/matching_api.rst b/docs/root/intro/arch_overview/advanced/matching/matching_api.rst index 1b56eb354d28..9ae4b1c68585 100644 --- a/docs/root/intro/arch_overview/advanced/matching/matching_api.rst +++ b/docs/root/intro/arch_overview/advanced/matching/matching_api.rst @@ -16,6 +16,9 @@ better performance than the linear list matching as seen in Envoy's HTTP routing use of extension points to make it easy to extend to different inputs based on protocol or environment data as well as custom sublinear matchers and direct matchers. +Filter Integration +################## + Within supported environments (currently only HTTP filters), a wrapper proto can be used to instantiate a matching filter associated with the wrapped structure: @@ -28,7 +31,7 @@ The above example wraps a HTTP filter (the allowing us to define a match tree to be evaluated in conjunction with evaluation of the wrapped filter. Prior to data being made available to the filter, it will be provided to the match tree, which will then attempt to evaluate the matching rules with the provided data, triggering an -action if match evaluation completes in an action. +action if match evaluation results in an action. In the above example, we are specifying that we want to match on the incoming request header ``some-header`` by setting the ``input`` to @@ -54,7 +57,7 @@ the filter if ``some-header: skip_filter`` is present and ``second-header`` is s .. _arch_overview_matching_api_iteration_impact: HTTP Filter Iteration Impact -============================ +**************************** The above example only demonstrates matching on request headers, which ends up being the simplest case due to it happening before the associated filter receives any data. Matching on other HTTP @@ -80,8 +83,15 @@ client will receive an invalid response back from Envoy. If the skip action was trailers, the same gRPC-Web filter would consume all the data but never write it back out (as this happens when it sees the trailers), resulting in a gRPC-Web response with an empty body. +HTTP Routing Integration +######################## + +The matching API can be used with HTTP routing, by specifying a match tree as part of the virtual host +and specifying a Route as the resulting action. See examples in the above sections for how the match +tree can be configured. + Match Tree Validation -===================== +##################### As the match tree structure is very flexible, some filters might need to impose additional restrictions on what kind of match trees can be used. This system is somewhat inflexible at the moment, only supporting @@ -91,7 +101,7 @@ will fail during configuration load, reporting back which data input was invalid This is done for example to limit the issues talked about in :ref:`the above section ` or to help users understand in what -context a match tree can be used for a specific filter. Due to the limitations of the validations framework +context a match tree can be used for a specific filter. Due to the limitations of the validation framework at the current time, it is not used for all filters. For HTTP filters, the restrictions are specified by the filter implementation, so consult the individual diff --git a/docs/root/intro/arch_overview/http/http_routing.rst b/docs/root/intro/arch_overview/http/http_routing.rst index 816218721590..2c20dd9428f9 100644 --- a/docs/root/intro/arch_overview/http/http_routing.rst +++ b/docs/root/intro/arch_overview/http/http_routing.rst @@ -192,3 +192,63 @@ upon configuration load and cache the contents. If **response_headers_to_add** has been set for the Route or the enclosing Virtual Host, Envoy will include the specified headers in the direct HTTP response. + +Routing Via Generic Matching +---------------------------- + +Envoy recently added support for utilzing a :ref:`generic match tree ` to +specify the route table. This is a more expressive matching engine than the original one, allowing +for sublinear matching on arbitrary headers (unlike the original matching engine which could only +do this for :authority in some cases). + +To use the generic matching tree, specify a matcher on a virtual host with a RouteAction action: + +.. code-block:: yaml + + matcher: + "@type": type.googleapis.com/xds.type.matcher.v3.Matcher + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: :path + exact_match_map: + map: + "/new_endpoint/foo": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: cluster_foo + request_headers_to_add: + - header: + key: x-route-header + value: new-value + "/new_endpoint/bar": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: cluster_bar + request_headers_to_add: + - header: + key: x-route-header + value: new-value + +This allows resolving the same Route proto message used for the `routes`-based routing using the additional +matching flexibility provided by the generic matching framework. + +Note that the resulting Route also specifies a match criteria. This must be satisfied in addition to resolving +the route in order to achieve a route match. When path rewrites are used, the matched path will only depend on +the match criteria of the resolved Route. Path matching done during the match tree traversal does not contribute +to path rewrites. + +The only inputs supported are request headers (via `envoy.type.matcher.v3.HttpRequestHeaderMatchInput`). See +the docs for the :ref:`matching API ` for more information about the API as a whole. diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index a40cddbe465f..7b77a4180be4 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -73,6 +73,6 @@ Further reading * File :ref:`access log sink `. * gRPC :ref:`Access Log Service (ALS) ` sink. -* OpenTelemetry (gRPC) :ref:`LogsService ` +* OpenTelemetry (gRPC) :ref:`LogsService ` * Stdout :ref:`access log sink ` * Stderr :ref:`access log sink ` diff --git a/docs/root/intro/arch_overview/upstream/dns_resolution.rst b/docs/root/intro/arch_overview/upstream/dns_resolution.rst index 878f44f1a9f6..b421b8439eae 100644 --- a/docs/root/intro/arch_overview/upstream/dns_resolution.rst +++ b/docs/root/intro/arch_overview/upstream/dns_resolution.rst @@ -13,6 +13,14 @@ Envoy uses `c-ares `_ as a third party DNS res On Apple OSes Envoy additionally offers resolution using Apple specific APIs via the ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime feature. +Envoy provides DNS resolution through extensions, and contains 2 built-in extensions: + +1) c-ares: :ref:`CaresDnsResolverConfig` + +2) Apple (iOS/macOS only): :ref:`AppleDnsResolverConfig` + +For an example of a built-in DNS typed configuration see the :ref:`HTTP filter configuration documentation `. + The Apple-based DNS Resolver emits the following stats rooted in the ``dns.apple`` stats tree: .. csv-table:: diff --git a/docs/root/intro/arch_overview/upstream/health_checking.rst b/docs/root/intro/arch_overview/upstream/health_checking.rst index 267542070ef4..0c3e7596bd01 100644 --- a/docs/root/intro/arch_overview/upstream/health_checking.rst +++ b/docs/root/intro/arch_overview/upstream/health_checking.rst @@ -12,10 +12,10 @@ checking along with various settings (check interval, failures required before m unhealthy, successes required before marking a host healthy, etc.): * **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By - default, it expects a 200 response if the host is healthy. Expected response codes are + default, it expects a 200 response if the host is healthy. Expected and retriable response codes are :ref:`configurable `. The - upstream host can return 503 if it wants to immediately notify downstream hosts to no longer - forward traffic to it. + upstream host can return a non-expected or non-retriable status code (any non-200 code by default) if + it wants to immediately notify downstream hosts to no longer forward traffic to it. * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the upstream host. It expects the byte buffer to be echoed in the response if the host is to be considered healthy. Envoy also supports connect only L3/L4 health checking. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst index 36e0fddd3ca8..de648a4b8c73 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancing.rst @@ -15,3 +15,4 @@ Load Balancing original_dst zone_aware subsets + slow_start diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst b/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst new file mode 100644 index 000000000000..e510f6698255 --- /dev/null +++ b/docs/root/intro/arch_overview/upstream/load_balancing/slow_start.rst @@ -0,0 +1,60 @@ +.. _arch_overview_load_balancing_slow_start: + +Slow start mode +=============== + +Slow start mode is a configuration setting in Envoy to progressively increase amount of traffic for newly added upstream endpoints. +With no slow start enabled Envoy would send a proportional amount of traffic to new upstream endpoints. +This could be undesirable for services that require warm up time to serve full production load and could result in request timeouts, loss of data and deteriorated user experience. + +Slow start mode is a mechanism that affects load balancing weight of upstream endpoints and can be configured per upstream cluster. +Currently, slow start is supported in :ref:`Round Robin ` and :ref:`Least Request ` load balancer types. + +Users can specify a :ref:`slow start window parameter` (in seconds), so that if endpoint "cluster membership duration" (amount of time since it has joined the cluster) is within the configured window, it enters slow start mode. +During slow start window, load balancing weight of a particular endpoint will be scaled with time factor, e.g.: + +.. math:: + + NewWeight = {Weight*TimeFactor}^\frac{1}{Aggression} + +where, + +.. math:: + + TimeFactor = \frac{max(TimeSinceStartInSeconds,1)}{SlowStartWindowInSeconds} + +As time progresses, more and more traffic would be sent to endpoint within slow start window. + +:ref:`Aggression parameter` non-linearly affects endpoint weight and represents the speed of ramp-up. +By tuning aggression parameter, one could achieve polynomial or exponential speed for traffic increase. +Below simulation demonstrates how various values for aggression affect traffic ramp-up: + +.. image:: /_static/slow_start_aggression.svg + :width: 60% + :align: center + +Whenever a slow start window duration elapses, upstream endpoint exits slow start mode and gets regular amount of traffic according to load balancing algorithm. +Its load balancing weight will no longer be scaled with runtime bias and aggression. Endpoint could also exit slow start mode in case it leaves the cluster. + +To reiterate, endpoint enters slow start mode: + * If no active healthcheck is configured per cluster, immediately if its cluster membership duration is within slow start window. + * In case an active healthcheck is configured per cluster, when its cluster membership duration is within slow start window and endpoint has passed an active healthcheck. + If endpoint does not pass an active healthcheck during entire slow start window (since it has been added to upstream cluster), then it never enters slow start mode. + +Endpoint exits slow start mode when: + * It leaves the cluster. + * Its cluster membership duration is greater than slow start window. + * It does not pass an active healthcheck configured per cluster. + Endpoint could further re-enter slow start, if it passes an active healthcheck and its creation time is within slow start window. + +It is not recommended enabling slow start mode in low traffic or high number of endpoints scenarios, potential drawbacks would be: + * Endpoint starvation, where endpoint has low probability to receive a request either due to low traffic or high number of total endpoints. + * Spurious (non-gradual) increase of traffic per endpoint, whenever a starving endpoint receives a request and sufficient time has passed within slow start window, + its load balancing weight will increase non linearly due to time factor. + +Below is an example of how result load balancing weight would look like for endpoints in same priority with Round Robin Loadbalancer type, slow start window of 60 seconds, no active healthcheck and 1.0 aggression. +Once endpoints E1 and E2 exit slow start mode, their load balancing weight remains constant: + +.. image:: /_static/slow_start_example.svg + :width: 60% + :align: center diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 72d243f0868d..11eb0480fff0 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -320,6 +320,14 @@ following are the command line options that Envoy supports. or count occurrences of unknown fields, in the interest of configuration processing speed. If :option:`--reject-unknown-dynamic-fields` is set to true, this flag has no effect. + .. attention:: + + In addition to not logging warnings or counting occurrences of unknown fields, setting this + option also disables counting and warnings of deprecated fields as well as work-in-progress + message and fields. It is *strongly* recommended that this option is not set on at least a + small portion of the fleet (staging, canary, etc.) in order to monitor for unknown, + deprecated, or work-in-progress usage. + .. option:: --disable-extensions *(optional)* This flag disabled the provided list of comma-separated extension names. Disabled diff --git a/docs/root/start/quick-start/securing.rst b/docs/root/start/quick-start/securing.rst index cf9f0b558c3c..ccfd6bd0ce06 100644 --- a/docs/root/start/quick-start/securing.rst +++ b/docs/root/start/quick-start/securing.rst @@ -12,7 +12,7 @@ Envoy also has support for transmitting and receiving generic ``TCP`` traffic wi Envoy also offers a number of other ``HTTP``-based protocols for authentication and authorization such as :ref:`JWT `, :ref:`RBAC ` -and :ref:`OAuth `. +and :ref:`OAuth `. .. warning:: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e8fb8b361f93..13d1e4962107 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,75 +1,20 @@ -1.20.0 (Pending) +1.21.0 (Pending) ================ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* config: the ``--bootstrap-version`` CLI flag has been removed, Envoy has only been able to accept v3 - bootstrap configurations since 1.18.0. -* contrib: the :ref:`squash filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`kafka broker filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`RocketMQ proxy filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`Postgres proxy filter ` has been moved to - :ref:`contrib images `. -* contrib: the :ref:`MySQL proxy filter ` has been moved to - :ref:`contrib images `. -* dns_filter: :ref:`dns_filter ` - protobuf fields have been renumbered to restore compatibility with Envoy - 1.18, breaking compatibility with Envoy 1.19.0 and 1.19.1. The new field - numbering allows control planes supporting Envoy 1.18 to gracefully upgrade to - :ref:`dns_resolution_config `, - provided they skip over Envoy 1.19.0 and 1.19.1. - Control planes upgrading from Envoy 1.19.0 and 1.19.1 will need to - vendor the corresponding protobuf definitions to ensure that the - renumbered fields have the types expected by those releases. -* ext_authz: fixed skipping authentication when returning either a direct response or a redirect. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect`` runtime guard to false. -* extensions: deprecated extension names now default to triggering a configuration error. - The previous warning-only behavior may be temporarily reverted by setting the runtime key - ``envoy.deprecated_features.allow_deprecated_extension_names`` to true. +* xds: ``*`` became a reserved name for a wildcard resource that can be subscribed to and unsubscribed from at any time. This is a requirement for implementing the on-demand xDSes (like on-demand CDS) that can subscribe to specific resources next to their wildcard subscription. If such xDS is subscribed to both wildcard resource and to other specific resource, then in stream reconnection scenario, the xDS will not send an empty initial request, but a request containing ``*`` for wildcard subscription and the rest of the resources the xDS is subscribed to. If the xDS is only subscribed to wildcard resource, it will try to send a legacy wildcard request. This behavior implements the recent changes in :ref:`xDS protocol ` and can be temporarily reverted by setting the ``envoy.restart_features.explicit_wildcard_resource`` runtime guard to false. Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* client_ssl_auth filter: now sets additional termination details and **UAEX** response flag when the client certificate is not in the allowed-list. -* config: configuration files ending in .yml now load as YAML. -* config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON file load as JSON. -* config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level - for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been - retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 - seconds. -* grpc: gRPC async client can be cached and shared across filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. -* http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ - (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior - can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. -* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of the request - URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed - to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` - to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard - ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set - to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request - authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. -* http: set the default :ref:`lazy headermap threshold ` to 3, - which defines the minimal number of headers in a request/response/trailers required for using a - dictionary in addition to the list. Setting the ``envoy.http.headermap.lazy_map_min_size`` runtime - feature to a non-negative number will override the default value. -* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. -* listener: added the :ref:`enable_reuse_port ` - field and changed the default for reuse_port from false to true, as the feature is now well - supported on the majority of production Linux kernels in use. The default change is aware of the hot - restart, as otherwise, the change would not be backward compatible between restarts. This means - that hot restarting onto a new binary will retain the default of false until the binary undergoes - a full restart. To retain the previous behavior, either explicitly set the new configuration - field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` - to false. As part of this change, the use of reuse_port for TCP listeners on both macOS and - Windows has been disabled due to suboptimal behavior. See the field documentation for more - information. -* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in-place update. -* quic: enables IETF connection migration. This feature requires a stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* config: the log message for "gRPC config stream closed" now uses the most recent error message, and reports seconds instead of milliseconds for how long the most recent status has been received. +* dns: now respecting the returned DNS TTL for resolved hosts, rather than always relying on the hard-coded :ref:`dns_refresh_rate. ` This behavior can be temporarily reverted by setting the runtime guard ``envoy.reloadable_features.use_dns_ttl`` to false. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. +* quic: add back the support for IETF draft 29 which is guarded via ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_disable_version_draft_29``. It is off by default so Envoy only supports RFCv1 without flipping this runtime guard explicitly. Draft 29 is not recommended for use. Bug Fixes --------- @@ -93,63 +38,56 @@ Bug Fixes * listener: fixed an issue on Windows where connections are not handled by all worker threads. * lua: fix ``BodyBuffer`` setting a Lua string and printing Lua string containing hex characters. Previously, ``BodyBuffer`` setting a Lua string or printing strings with hex characters will be truncated. * xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under the 'annotations' section of the segment data. +* ext_authz: fix the ext_authz network filter to correctly set response flag and code details to ``UAEX`` when a connection is denied. +* listener: fixed the crash when updating listeners that do not bind to port. +* thrift_proxy: fix the thrift_proxy connection manager to correctly report success/error response metrics when performing :ref:`payload passthrough `. Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` -* http: removed ``envoy.reloadable_features.http_upstream_wait_connect_response`` runtime guard and legacy code paths. -* http: removed ``envoy.reloadable_features.allow_preconnect`` runtime guard and legacy code paths. -* listener: removed ``envoy.reloadable_features.disable_tls_inspector_injection`` runtime guard and legacy code paths. -* ocsp: removed ``envoy.reloadable_features.check_ocsp_policy deprecation`` runtime guard and legacy code paths. -* ocsp: removed ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs deprecation`` and legacy code paths. -* quic: removed ``envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing`` runtime guard. +* compression: removed ``envoy.reloadable_features.enable_compression_without_content_length_header`` runtime guard and legacy code paths. +* health check: removed ``envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster`` runtime guard and legacy code paths. +* http: removed ``envoy.reloadable_features.add_and_validate_scheme_header`` and legacy code paths. +* http: removed ``envoy.reloadable_features.check_unsupported_typed_per_filter_config``, Envoy will always check unsupported typed per filter config if the filter isn't optional. +* http: removed ``envoy.reloadable_features.dont_add_content_length_for_bodiless_requests deprecation`` and legacy code paths. +* http: removed ``envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits`` and legacy code paths. +* http: removed ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` and legacy code paths. Envoy will always encode empty trailers by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. +* http: removed ``envoy.reloadable_features.improved_stream_limit_handling`` and legacy code paths. +* http: removed ``envoy.reloadable_features.remove_forked_chromium_url`` and legacy code paths. +* http: removed ``envoy.reloadable_features.return_502_for_upstream_protocol_errors``. Envoy will always return 502 code upon encountering upstream protocol error. +* http: removed ``envoy.reloadable_features.treat_host_like_authority`` and legacy code paths. +* http: removed ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` and legacy code paths. +* upstream: removed ``envoy.reloadable_features.upstream_host_weight_change_causes_rebuild`` and legacy code paths. New Features ------------ -* access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). -* bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. -* contrib: added new :ref:`contrib images ` which contain contrib extensions. -* dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. -* ext_authz: added :ref:`dynamic_metadata_from_headers ` to support emitting dynamic metadata from headers returned by an external authorization service via HTTP. -* grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. -* http: added cluster_header in :ref:`weighted_clusters ` to allow routing to the weighted cluster specified in the request_header. -* http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_. -* http: added :ref:`string_match ` in the header matcher. -* http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. -* http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. -* http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. -* http: validating outgoing HTTP/2 CONNECT requests to ensure that if ``:path`` is set that ``:protocol`` is present. This behavior can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.validate_connect`` to false. -* jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. -* jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. -* jwt_authn: added support for setting the extracted headers from a successfully verified JWT using :ref:`header_in_metadata ` to dynamic metadata. -* listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. -* lua: added ``header:getAtIndex()`` and ``header:getNumValues()`` methods to :ref:`header object ` for retrieving the value of a header at certain index and get the total number of values for a given header. -* matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. -* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. -* rbac: added :ref:`destination_port_range ` for matching range of destination ports. -* rbac: added :ref:`matcher` along with extension category ``extension_category_envoy.rbac.matchers`` for custom RBAC permission matchers. Added reference implementation for matchers :ref:`envoy.rbac.matchers.upstream_ip_port `. -* route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. -* router: added retry options predicate extensions configured via - :ref:` `. These - extensions allow modification of requests between retries at the router level. There are not - currently any built-in extensions that implement this extension point. -* router: added :ref:`per_try_idle_timeout ` timeout configuration. -* router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. -* sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. -* thrift_proxy: added support for :ref:`mirroring requests `. -* udp: allows updating filter chain in-place through LDS, which is supported by Quic listener. Such listener config will be rejected in other connection-less UDP listener implementations. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. -* udp: disallow L4 filter chain in config which configures connection-less UDP listener. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* api: added support for *xds.type.v3.TypedStruct* in addition to the now-deprecated *udpa.type.v1.TypedStruct* proto message, which is a wrapper proto used to encode typed JSON data in a *google.protobuf.Any* field. +* bootstrap: added :ref:`typed_dns_resolver_config ` in the bootstrap to support DNS resolver as an extension. +* cluster: added :ref:`typed_dns_resolver_config ` in the cluster to support DNS resolver as an extension. +* config: added :ref:`environment_variable ` to the :ref:`DataSource `. +* dns: added :ref:`ALL ` option to return both IPv4 and IPv6 addresses. +* dns_cache: added :ref:`typed_dns_resolver_config ` in the dns_cache to support DNS resolver as an extension. +* dns_filter: added :ref:`typed_dns_resolver_config ` in the dns_filter to support DNS resolver as an extension. +* dns_resolver: added :ref:`CaresDnsResolverConfig` to support c-ares DNS resolver as an extension. +* dns_resolver: added :ref:`AppleDnsResolverConfig` to support apple DNS resolver as an extension. +* ext_authz: added :ref:`query_parameters_to_set ` and :ref:`query_parameters_to_remove ` for adding and removing query string parameters when using a gRPC authorization server. +* http: added support for :ref:`retriable health check status codes `. +* listener: added API for extensions to access :ref:`typed_filter_metadata ` configured in the listener's :ref:`metadata ` field. +* oauth filter: added :ref:`cookie_names ` to allow overriding (default) cookie names (``BearerToken``, ``OauthHMAC``, and ``OauthExpires``) set by the filter. +* thrift_proxy: add upstream response zone metrics in the form ``cluster.cluster_name.zone.local_zone.upstream_zone.thrift.upstream_resp_success``. +* thrift_proxy: add upstream metrics to show decoding errors and whether exception is from local or remote, e.g. ``cluster.cluster_name.thrift.upstream_resp_exception_remote``. +* thrift_proxy: add host level success/error metrics where success is a reply of type success and error is any other response to a call. +* thrift_proxy: support subset lb when using request or route metadata. +* transport_socket: added :ref:`envoy.transport_sockets.tcp_stats ` which generates additional statistics gathered from the OS TCP stack. +* udp: add support for multiple listener filters. +* upstream: added the ability to :ref:`configure max connection duration ` for upstream clusters. +* vcl_socket_interface: added VCL socket interface extension for fd.io VPP integration to :ref:`contrib images `. This can be enabled via :ref:`VCL ` configuration. +* xds: re-introduced unified delta and sotw xDS multiplexers that share most of the implementation. Added a new runtime config ``envoy.reloadable_features.unified_mux`` (disabled by default) that when enabled, switches xDS to use unified multiplexers. Deprecated ---------- - -* api: the :ref:`matcher ` field has been deprecated in favor of - :ref:`matcher ` in order to break a build dependency. -* cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. -* http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, - :ref:`prefix_match `, :ref:`suffix_match ` and - :ref:`contains_match ` are deprecated by :ref:`string_match `. -* listener: :ref:`reuse_port ` has been - deprecated in favor of :ref:`enable_reuse_port `. - At the same time, the default has been changed from false to true. See above for more information. +* bootstrap: :ref:`dns_resolution_config ` is deprecated in favor of :ref:`typed_dns_resolver_config `. +* cluster: :ref:`dns_resolution_config ` is deprecated in favor of :ref:`typed_dns_resolver_config `. +* dns_cache: :ref:`dns_resolution_config ` is deprecated in favor of :ref:`typed_dns_resolver_config `. +* dns_filter: :ref:`dns_resolution_config ` is deprecated in favor of :ref:`typed_dns_resolver_config `. diff --git a/docs/root/version_history/v1.1.0.rst b/docs/root/version_history/v1.1.0.rst index 4ad2763e52a4..a01703e61c65 100644 --- a/docs/root/version_history/v1.1.0.rst +++ b/docs/root/version_history/v1.1.0.rst @@ -6,26 +6,26 @@ Changes * Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in 1.2.0). -* Upgrade :ref:`recommended version ` of various other libraries. +* Upgrade :ref:`recommended version ` of various other libraries. * Configurable DNS refresh rate for DNS service discovery types. * Upstream circuit breaker configuration can be :ref:`overridden via runtime - `. -* :ref:`Zone aware routing support `. + `. +* :ref:`Zone aware routing support `. * Generic header matching routing rule. * HTTP/2 graceful connection draining (double GOAWAY). -* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS +* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS feature). -* Initial release of the :ref:`fault injection HTTP filter `. -* HTTP :ref:`rate limit filter ` enhancements (note that the +* Initial release of the :ref:`fault injection HTTP filter `. +* HTTP :ref:`rate limit filter ` enhancements (note that the configuration for HTTP rate limiting is going to be overhauled in 1.2.0). -* Added :ref:`refused-stream retry policy `. -* Multiple :ref:`priority queues ` for upstream clusters +* Added :ref:`refused-stream retry policy `. +* Multiple :ref:`priority queues ` for upstream clusters (configurable on a per route basis, with separate connection pools, circuit breakers, etc.). -* Added max connection circuit breaking to the :ref:`TCP proxy filter `. -* Added :ref:`CLI ` options for setting the logging file flush interval as well +* Added max connection circuit breaking to the :ref:`TCP proxy filter `. +* Added :ref:`CLI ` options for setting the logging file flush interval as well as the drain/shutdown time during hot restart. * A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a few new configuration flags to allow disabling expensive features if they are not needed (specifically request ID generation and dynamic response code stats). -* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. +* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. * Lots of other small fixes and enhancements not listed. diff --git a/docs/root/version_history/v1.10.0.rst b/docs/root/version_history/v1.10.0.rst index b8616a86a7c5..da7f3ee9baa0 100644 --- a/docs/root/version_history/v1.10.0.rst +++ b/docs/root/version_history/v1.10.0.rst @@ -5,97 +5,97 @@ Changes ------- * access log: added a new flag for upstream retry count exceeded. -* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. +* access log: added a :ref:`gRPC filter ` to allow filtering on gRPC status. * access log: added a new flag for stream idle timeout. -* access log: added a new field for upstream transport failure reason in :ref:`file access logger ` and - :ref:`gRPC access logger ` for HTTP access logs. +* access log: added a new field for upstream transport failure reason in :ref:`file access logger ` and + :ref:`gRPC access logger ` for HTTP access logs. * access log: added new fields for downstream x509 information (URI sans and subject) to file and gRPC access logger. * admin: the admin server can now be accessed via HTTP/2 (prior knowledge). * admin: changed HTTP response status code from 400 to 405 when attempting to GET a POST-only route (such as /quitquitquit). * buffer: fix vulnerabilities when allocation fails. * build: releases are built with GCC-7 and linked with LLD. -* build: dev docker images :ref:`have been split ` from tagged images for easier +* build: dev docker images :ref:`have been split ` from tagged images for easier discoverability in Docker Hub. Additionally, we now build images for point releases. * config: added support of using google.protobuf.Any in opaque configs for extensions. * config: logging warnings when deprecated fields are in use. * config: removed deprecated --v2-config-only from command line config. -* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. -* config: removed the deprecated_v1 config option from :ref:`ring hash `. -* config: removed REST_LEGACY as a valid :ref:`ApiType `. +* config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. +* config: removed the deprecated_v1 config option from :ref:`ring hash `. +* config: removed REST_LEGACY as a valid :ref:`ApiType `. * config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also. * config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. -* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. -* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. +* config: added support for :ref:`initial_fetch_timeout `. The timeout is disabled by default. +* cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. * csrf: added * ext_authz: added support for buffering request body. * ext_authz: migrated from v2alpha to v2 and improved docs. * ext_authz: added a configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * ext_authz: migrated from v2alpha to v2 and improved the documentation. * ext_authz: authorization request and response configuration has been separated into two distinct objects: :ref:`authorization request - ` and :ref:`authorization response - `. In addition, :ref:`client headers - ` and :ref:`upstream headers - ` replaces the previous *allowed_authorization_headers* object. - All the control header lists now support :ref:`string matcher ` instead of standard string. + ` and :ref:`authorization response + `. In addition, :ref:`client headers + ` and :ref:`upstream headers + ` replaces the previous *allowed_authorization_headers* object. + All the control header lists now support :ref:`string matcher ` instead of standard string. * fault: added the :ref:`max_active_faults - ` setting, as well as - :ref:`statistics ` for the number of active faults + ` setting, as well as + :ref:`statistics ` for the number of active faults and the number of faults the overflowed. * fault: added :ref:`response rate limit - ` fault injection. + ` fault injection. * fault: added :ref:`HTTP header fault configuration - ` to the HTTP fault filter. + ` to the HTTP fault filter. * governance: extending Envoy deprecation policy from 1 release (0-3 months) to 2 releases (3-6 months). -* health check: expected response codes in http health checks are now :ref:`configurable `. +* health check: expected response codes in http health checks are now :ref:`configurable `. * http: added new grpc_http1_reverse_bridge filter for converting gRPC requests into HTTP/1.1 requests. * http: fixed a bug where Content-Length:0 was added to HTTP/1 204 responses. -* http: added :ref:`max request headers size `. The default behaviour is unchanged. +* http: added :ref:`max request headers size `. The default behaviour is unchanged. * http: added modifyDecodingBuffer/modifyEncodingBuffer to allow modifying the buffered request/response data. * http: added encodeComplete/decodeComplete. These are invoked at the end of the stream, after all data has been encoded/decoded respectively. Default implementation is a no-op. -* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. -* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy ` for more details. +* outlier_detection: added support for :ref:`outlier detection event protobuf-based logging `. +* mysql: added a MySQL proxy filter that is capable of parsing SQL queries over MySQL wire protocol. Refer to :ref:`MySQL proxy ` for more details. * performance: new buffer implementation (disabled by default; to test it, add "--use-libevent-buffers 0" to the command-line arguments when starting Envoy). -* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. +* jwt_authn: added :ref:`filter_state_rules ` to allow specifying requirements from filterState by other filters. * ratelimit: removed deprecated rate limit configuration from bootstrap. -* redis: added :ref:`hashtagging ` to guarantee a given key's upstream. -* redis: added :ref:`latency stats ` for commands. -* redis: added :ref:`success and error stats ` for commands. +* redis: added :ref:`hashtagging ` to guarantee a given key's upstream. +* redis: added :ref:`latency stats ` for commands. +* redis: added :ref:`success and error stats ` for commands. * redis: migrate hash function for host selection to `MurmurHash2 `_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. -* router: added ability to configure a :ref:`retry policy ` at the +* redis: added :ref:`latency_in_micros ` to specify the redis commands stats time unit in microseconds. +* router: added ability to configure a :ref:`retry policy ` at the virtual host level. * router: added reset reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:` -* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. -* router: added per-route configuration of :ref:`internal redirects `. +* router: added :ref:`rq_reset_after_downstream_response_started ` counter stat to router stats. +* router: added per-route configuration of :ref:`internal redirects `. * router: removed deprecated route-action level headers_to_add/remove. -* router: made :ref:`max retries header ` take precedence over the number of retries in route and virtual host retry policies. -* router: added support for prefix wildcards in :ref:`virtual host domains ` +* router: made :ref:`max retries header ` take precedence over the number of retries in route and virtual host retry policies. +* router: added support for prefix wildcards in :ref:`virtual host domains ` * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been updated at least once. * stats: added gauges tracking remaining resources before circuit breakers open. -* tap: added new alpha :ref:`HTTP tap filter `. +* tap: added new alpha :ref:`HTTP tap filter `. * tls: enabled TLS 1.3 on the server-side (non-FIPS builds). -* upstream: add hash_function to specify the hash function for :ref:`ring hash ` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. -* upstream: added :ref:`degraded health value ` which allows +* upstream: add hash_function to specify the hash function for :ref:`ring hash ` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. +* upstream: added :ref:`degraded health value ` which allows routing to certain hosts only when there are insufficient healthy hosts available. -* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type `. -* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. -* tracing: added :ref:`verbose ` to support logging annotations on spans. -* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size ` config parameter to strictly bound the ring size. +* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type `. +* upstream: added a :ref:`circuit breaker ` to limit the number of concurrent connection pools in use. +* tracing: added :ref:`verbose ` to support logging annotations on spans. +* upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size ` config parameter to strictly bound the ring size. * zookeeper: added a ZooKeeper proxy filter that parses ZooKeeper messages (requests/responses/events). - Refer to :ref:`ZooKeeper proxy ` for more details. + Refer to :ref:`ZooKeeper proxy ` for more details. * upstream: added configuration option to select any host when the fallback policy fails. * upstream: stopped incrementing upstream_rq_total for HTTP/1 conn pool when request is circuit broken. Deprecated ---------- -* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. +* Use of `use_alpha` in :ref:`Ext-Authz Authorization Service ` is deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * Use of ``enabled`` in ``CorsPolicy``, found in - :ref:`route.proto `. + :ref:`route.proto `. Set the ``filter_enabled`` field instead. * Use of the ``type`` field in the ``FaultDelay`` message (found in - :ref:`fault.proto `) + :ref:`fault.proto `) has been deprecated. It was never used and setting it has no effect. It will be removed in the following release. diff --git a/docs/root/version_history/v1.11.0.rst b/docs/root/version_history/v1.11.0.rst index 78c9dce6d7c6..b59b25d88763 100644 --- a/docs/root/version_history/v1.11.0.rst +++ b/docs/root/version_history/v1.11.0.rst @@ -6,115 +6,115 @@ Changes * access log: added a new field for downstream TLS session ID to file and gRPC access logger. * access log: added a new field for route name to file and gRPC access logger. -* access log: added a new field for response code details in :ref:`file access logger ` and :ref:`gRPC access logger `. -* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger ` and :ref:`gRPC access logger `. +* access log: added a new field for response code details in :ref:`file access logger ` and :ref:`gRPC access logger `. +* access log: added several new variables for exposing information about the downstream TLS connection to :ref:`file access logger ` and :ref:`gRPC access logger `. * access log: added a new flag for request rejected due to failed strict header check. -* admin: the administration interface now includes a :ref:`/ready endpoint ` for easier readiness checks. -* admin: extend :ref:`/runtime_modify endpoint ` to support parameters within the request body. -* admin: the :ref:`/listener endpoint ` now returns :ref:`listeners.proto ` which includes listener names and ports. +* admin: the administration interface now includes a :ref:`/ready endpoint ` for easier readiness checks. +* admin: extend :ref:`/runtime_modify endpoint ` to support parameters within the request body. +* admin: the :ref:`/listener endpoint ` now returns :ref:`listeners.proto ` which includes listener names and ports. * admin: added host priority to :http:get:`/clusters` and :http:get:`/clusters?format=json` endpoint response -* admin: the :ref:`/clusters endpoint ` now shows hostname +* admin: the :ref:`/clusters endpoint ` now shows hostname for each host, useful for DNS based clusters. * api: track and report requests issued since last load report. * build: releases are built with Clang and linked with LLD. -* config: added :ref:`stats_server_version_override ` in bootstrap, that can be used to override :ref:`server.version statistic `. -* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` +* config: added :ref:`stats_server_version_override ` in bootstrap, that can be used to override :ref:`server.version statistic `. +* control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` * csrf: added support for allowlisting additional source origins. * dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate. -* dubbo_proxy: support the :ref:`dubbo proxy filter `. +* dubbo_proxy: support the :ref:`dubbo proxy filter `. * dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException). -* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. +* eds: added support to specify max time for which endpoints can be used :ref:`gRPC filter `. * eds: removed max limit for ``load_balancing_weight``. -* event: added :ref:`loop duration and poll delay statistics `. +* event: added :ref:`loop duration and poll delay statistics `. * ext_authz: added a ``x-envoy-auth-partial-body`` metadata header set to ``false|true`` indicating if there is a partial body sent in the authorization request message. * ext_authz: added configurable status code that allows customizing HTTP responses on filter check status errors. * ext_authz: added option to ``ext_authz`` that allows the filter clearing route cache. * grpc-json: added support for :ref:`auto mapping - `. -* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. + `. +* health check: added :ref:`initial jitter ` to add jitter to the first health check in order to prevent thundering herd on Envoy startup. * hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11. * http: added the ability to pass a URL encoded PEM encoded peer certificate chain in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. * http: fixed a bug where large unbufferable responses were not tracked in stats and logs correctly. * http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on. -* http: mitigated a race condition with the :ref:`delayed_close_timeout ` where it could trigger while actively flushing a pending write buffer for a downstream connection. -* http: added support for :ref:`preserve_external_request_id ` that represents whether the x-request-id should not be reset on edge entry inside mesh +* http: mitigated a race condition with the :ref:`delayed_close_timeout ` where it could trigger while actively flushing a pending write buffer for a downstream connection. +* http: added support for :ref:`preserve_external_request_id ` that represents whether the x-request-id should not be reset on edge entry inside mesh * http: changed ``sendLocalReply`` to send percent-encoded ``GrpcMessage``. -* http: added a :ref:`header_prefix ` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy. -* http: added :ref:`dynamic forward proxy ` support. +* http: added a :ref:`header_prefix ` configuration option to allow Envoy to send and process x-custom- prefixed headers rather than x-envoy. +* http: added :ref:`dynamic forward proxy ` support. * http: tracking the active stream and dumping state in Envoy crash handlers. This can be disabled by building with ``--define disable_object_dump_on_signal_trace=disabled`` * jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123`` -* listener: added :ref:`source IP ` - and :ref:`source port ` filter +* listener: added :ref:`source IP ` + and :ref:`source port ` filter chain matching. * lua: exposed functions to Lua to verify digital signature. -* original_src filter: added the :ref:`filter `. -* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors ` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details. +* original_src filter: added the :ref:`filter `. +* outlier_detector: added configuration :ref:`outlier_detection.split_external_local_origin_errors ` to distinguish locally and externally generated errors. See :ref:`arch_overview_outlier_detection` for full details. * rbac: migrated from v2alpha to v2. * redis: add support for Redis cluster custom cluster type. * redis: automatically route commands using cluster slots for Redis cluster. -* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. -* redis: added :ref:`request mirror policy ` to enable shadow traffic and/or dual writes. +* redis: added :ref:`prefix routing ` to enable routing commands based on their key's prefix to different upstream. +* redis: added :ref:`request mirror policy ` to enable shadow traffic and/or dual writes. * redis: add support for zpopmax and zpopmin commands. * redis: added - :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and - :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. -* redis: added auth support :ref:`downstream_auth_password ` for downstream client authentication, and :ref:`auth_password ` to configure authentication passwords for upstream server clusters. -* retry: added a retry predicate that :ref:`rejects canary hosts. ` -* router: add support for configuring a :ref:`gRPC timeout offset ` on incoming requests. -* router: added ability to control retry back-off intervals via :ref:`retry policy `. -* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy `. + :ref:`max_buffer_size_before_flush ` to batch commands together until the encoder buffer hits a certain size, and + :ref:`buffer_flush_timeout ` to control how quickly the buffer is flushed if it is not full. +* redis: added auth support :ref:`downstream_auth_password ` for downstream client authentication, and :ref:`auth_password ` to configure authentication passwords for upstream server clusters. +* retry: added a retry predicate that :ref:`rejects canary hosts. ` +* router: add support for configuring a :ref:`gRPC timeout offset ` on incoming requests. +* router: added ability to control retry back-off intervals via :ref:`retry policy `. +* router: added ability to issue a hedged retry in response to a per try timeout via a :ref:`hedge policy `. * router: added a route name field to each http route in route.Route list * router: added several new variables for exposing information about the downstream TLS connection via :ref:`header - formatters `. + formatters `. * router: per try timeouts will no longer start before the downstream request has been received in full by the router.This ensures that the per try timeout does not account for slow downstreams and that will not start before the global timeout. -* router: added :ref:`RouteAction's auto_host_rewrite_header ` to allow upstream host header substitution with some other header's value +* router: added :ref:`RouteAction's auto_host_rewrite_header ` to allow upstream host header substitution with some other header's value * router: added support for UPSTREAM_REMOTE_ADDRESS :ref:`header formatter - `. + `. * router: add ability to reject a request that includes invalid values for - headers configured in :ref:`strict_check_headers ` + headers configured in :ref:`strict_check_headers ` * runtime: added support for :ref:`flexible layering configuration - `. + `. * runtime: added support for statically :ref:`specifying the runtime in the bootstrap configuration - `. -* runtime: :ref:`Runtime Discovery Service (RTDS) ` support added to layered runtime configuration. -* sandbox: added :ref:`CSRF sandbox `. + `. +* runtime: :ref:`Runtime Discovery Service (RTDS) ` support added to layered runtime configuration. +* sandbox: added :ref:`CSRF sandbox `. * server: ``--define manual_stamp=manual_stamp`` was added to allow server stamping outside of binary rules. more info in the `bazel docs `_. -* server: added :ref:`server state ` statistic. -* server: added :ref:`initialization_time_ms ` statistic. -* subset: added :ref:`list_as_any ` option to +* server: added :ref:`server state ` statistic. +* server: added :ref:`initialization_time_ms ` statistic. +* subset: added :ref:`list_as_any ` option to the subset lb which allows matching metadata against any of the values in a list value on the endpoints. -* tools: added :repo:`proto ` support for :ref:`router check tool ` tests. +* tools: added :repo:`proto ` support for :ref:`router check tool ` tests. * tracing: add trace sampling configuration to the route, to override the route level. -* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. +* upstream: added :ref:`upstream_cx_pool_overflow ` for the connection pool circuit breaker. * upstream: an EDS management server can now force removal of a host that is still passing active health checking by first marking the host as failed via EDS health check and subsequently removing it in a future update. This is a mechanism to work around a race condition in which an EDS implementation may remove a host before it has stopped passing active HC, thus causing the host to become stranded until a future update. -* upstream: added :ref:`an option ` +* upstream: added :ref:`an option ` that allows ignoring new hosts for the purpose of load balancing calculations until they have been health checked for the first time. * upstream: added runtime error checking to prevent setting dns type to STRICT_DNS or LOGICAL_DNS when custom resolver name is specified. -* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer `. -* upstream: the :ref:`logical DNS cluster ` now +* upstream: added possibility to override fallback_policy per specific selector in :ref:`subset load balancer `. +* upstream: the :ref:`logical DNS cluster ` now displays the current resolved IP address in admin output instead of 0.0.0.0. Deprecated ---------- * The --max-stats and --max-obj-name-len flags no longer has any effect. -* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. -* Use of :ref:`catch_all_cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. -* Use of json based schema in router check tool tests. The tests should follow validation :repo:`schema `. -* Use of the v1 style route configuration for the :ref:`TCP proxy filter ` - is now fully replaced with listener :ref:`filter chain matching `. +* Use of :ref:`cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. +* Use of :ref:`catch_all_cluster ` in :ref:`redis_proxy.proto ` is deprecated. Set a :ref:`catch_all_route ` instead. +* Use of json based schema in router check tool tests. The tests should follow validation :repo:`schema `. +* Use of the v1 style route configuration for the :ref:`TCP proxy filter ` + is now fully replaced with listener :ref:`filter chain matching `. Use this instead. -* Use of :ref:`runtime ` in :ref:`Bootstrap - `. Use :ref:`layered_runtime - ` instead. +* Use of :ref:`runtime ` in :ref:`Bootstrap + `. Use :ref:`layered_runtime + ` instead. * Specifying "deprecated_v1: true" in HTTP and network filter configuration to allow loading JSON configuration is now deprecated and will be removed in a following release. Update any custom filters to use protobuf configuration. A struct can be used for a mostly 1:1 conversion if needed. diff --git a/docs/root/version_history/v1.11.1.rst b/docs/root/version_history/v1.11.1.rst index 53176eac2b29..7d87049d2049 100644 --- a/docs/root/version_history/v1.11.1.rst +++ b/docs/root/version_history/v1.11.1.rst @@ -5,15 +5,15 @@ Changes ------- * http: added mitigation of client initiated attacks that result in flooding of the downstream HTTP/2 connections. Those attacks can be logged at the "warning" level when the runtime feature ``http.connection_manager.log_flood_exception`` is enabled. The runtime setting defaults to disabled to avoid log spam when under attack. -* http: added :ref:`inbound_empty_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload`` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting `. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload. -* http: added :ref:`inbound_priority_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream`` overrides :ref:`max_inbound_priority_frames_per_stream setting `. Large override value effectively disables flood mitigation of inbound PRIORITY frames. -* http: added :ref:`inbound_window_update_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent`` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting `. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames. -* http: added :ref:`outbound_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting ` - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_frames`` overrides :ref:`max_outbound_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of all types. -* http: added :ref:`outbound_control_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames`` overrides :ref:`max_outbound_control_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM. -* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting `. - Runtime feature ``envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging`` overrides :ref:`stream_error_on_invalid_http_messaging config setting `. +* http: added :ref:`inbound_empty_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_consecutive_inbound_frames_with_empty_payload`` overrides :ref:`max_consecutive_inbound_frames_with_empty_payload setting `. Large override value (i.e. 2147483647) effectively disables mitigation of inbound frames with empty payload. +* http: added :ref:`inbound_priority_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound PRIORITY frames. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_priority_frames_per_stream`` overrides :ref:`max_inbound_priority_frames_per_stream setting `. Large override value effectively disables flood mitigation of inbound PRIORITY frames. +* http: added :ref:`inbound_window_update_frames_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the limit on inbound WINDOW_UPDATE frames. The limit is configured by setting the :ref:`max_inbound_window_update_frames_per_data_frame_sent config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_inbound_window_update_frames_per_data_frame_sent`` overrides :ref:`max_inbound_window_update_frames_per_data_frame_sent setting `. Large override value effectively disables flood mitigation of inbound WINDOW_UPDATE frames. +* http: added :ref:`outbound_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit. The limit is configured by setting the :ref:`max_outbound_frames config setting ` + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_frames`` overrides :ref:`max_outbound_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of all types. +* http: added :ref:`outbound_control_flood ` counter stat to the HTTP/2 codec stats, for tracking number of connections terminated for exceeding the outbound queue limit for PING, SETTINGS and RST_STREAM frames. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.max_outbound_control_frames`` overrides :ref:`max_outbound_control_frames config setting `. Large override value effectively disables flood mitigation of outbound frames of types PING, SETTINGS and RST_STREAM. +* http: enabled strict validation of HTTP/2 messaging. Previous behavior can be restored using :ref:`stream_error_on_invalid_http_messaging config setting `. + Runtime feature ``envoy.reloadable_features.http2_protocol_options.stream_error_on_invalid_http_messaging`` overrides :ref:`stream_error_on_invalid_http_messaging config setting `. diff --git a/docs/root/version_history/v1.11.2.rst b/docs/root/version_history/v1.11.2.rst index 77f6b40f118d..981c32133359 100644 --- a/docs/root/version_history/v1.11.2.rst +++ b/docs/root/version_history/v1.11.2.rst @@ -5,17 +5,17 @@ Changes ------- * http: fixed CVE-2019-15226 by adding a cached byte size in HeaderMap. -* http: added :ref:`max headers count ` for http connections. The default limit is 100. -* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count ` -* http: added :ref:`common_http_protocol_options ` - Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count ` +* http: added :ref:`max headers count ` for http connections. The default limit is 100. +* upstream: runtime feature `envoy.reloadable_features.max_response_headers_count` overrides the default limit for upstream :ref:`max headers count ` +* http: added :ref:`common_http_protocol_options ` + Runtime feature `envoy.reloadable_features.max_request_headers_count` overrides the default limit for downstream :ref:`max headers count ` * regex: backported safe regex matcher fix for CVE-2019-15225. Deprecated ---------- * Use of :ref:`idle_timeout - ` + ` is deprecated. Use :ref:`common_http_protocol_options - ` + ` instead. diff --git a/docs/root/version_history/v1.12.0.rst b/docs/root/version_history/v1.12.0.rst index ef1d0050194a..159d4c738bf7 100644 --- a/docs/root/version_history/v1.12.0.rst +++ b/docs/root/version_history/v1.12.0.rst @@ -4,86 +4,86 @@ Changes ------- -* access log: added a new flag for :ref:`downstream protocol error `. -* access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. -* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. -* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. -* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. -* admin: added ability to configure listener :ref:`socket options `. -* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. -* admin: added support for :ref:`draining ` listeners via admin interface. +* access log: added a new flag for :ref:`downstream protocol error `. +* access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. +* access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. +* access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. +* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. +* admin: added ability to configure listener :ref:`socket options `. +* admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. +* admin: added support for :ref:`draining ` listeners via admin interface. * admin: added :http:get:`/stats/recentlookups`, :http:post:`/stats/recentlookups/clear`, :http:post:`/stats/recentlookups/disable`, and :http:post:`/stats/recentlookups/enable` endpoints. -* api: added :ref:`set_node_on_first_message_only ` option to omit the node identifier from the subsequent discovery requests on the same stream. +* api: added :ref:`set_node_on_first_message_only ` option to omit the node identifier from the subsequent discovery requests on the same stream. * buffer filter: now populates content-length header if not present. This behavior can be temporarily disabled using the runtime feature ``envoy.reloadable_features.buffer_filter_populate_content_length``. * build: official released binary is now PIE so it can be run with ASLR. -* config: added support for :ref:`delta xDS ` (including ADS) delivery. +* config: added support for :ref:`delta xDS ` (including ADS) delivery. * config: enforcing that terminal filters (e.g. HttpConnectionManager for L4, router for L7) be the last in their respective filter chains. -* config: added access log :ref:`extension filter `. +* config: added access log :ref:`extension filter `. * config: added support for :option:`--reject-unknown-dynamic-fields`, providing independent control over whether unknown fields are rejected in static and dynamic configuration. By default, unknown fields in static configuration are rejected and are allowed in dynamic configuration. Warnings are logged for the first use of any unknown field and these occurrences are counted in the - :ref:`server.static_unknown_fields ` and :ref:`server.dynamic_unknown_fields - ` statistics. + :ref:`server.static_unknown_fields ` and :ref:`server.dynamic_unknown_fields + ` statistics. * config: added async data access for local and remote data sources. -* config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. -* config: added stat :ref:`init_fetch_timeout `. -* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation ` for more information. +* config: changed the default value of :ref:`initial_fetch_timeout ` from 0s to 15s. This is a change in behaviour in the sense that Envoy will move to the next initialization phase, even if the first config is not delivered in 15s. Refer to :ref:`initialization process ` for more details. +* config: added stat :ref:`init_fetch_timeout `. +* config: tls_context in Cluster and FilterChain are deprecated in favor of transport socket. See :ref:`deprecated documentation ` for more information. * csrf: added PATCH to supported methods. -* dns: added support for configuring :ref:`dns_failure_refresh_rate ` to set the DNS refresh rate during failures. -* ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. -* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag ` to filter. +* dns: added support for configuring :ref:`dns_failure_refresh_rate ` to set the DNS refresh rate during failures. +* ext_authz: added :ref:`configurable ability ` to send dynamic metadata to the `ext_authz` service. +* ext_authz: added :ref:`filter_enabled RuntimeFractionalPercent flag ` to filter. * ext_authz: added tracing to the HTTP client. -* ext_authz: deprecated :ref:`cluster scope stats ` in favour of filter scope stats. -* fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. -* grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. -* grpc: added :ref:`gRPC stats filter ` for collecting stats about gRPC calls and streaming message counts. -* grpc-json: added support for :ref:`ignoring unknown query parameters `. -* grpc-json: added support for :ref:`the grpc-status-details-bin header `. -* header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. -* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout ` to zero in Cluster :ref:`http_protocol_options `. To disable downstream idle timeouts, either set :ref:`idle_timeout ` to zero in the HttpConnectionManager :ref:`common_http_protocol_options ` or set the deprecated :ref:`connection manager ` field to zero. -* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format `. +* ext_authz: deprecated :ref:`cluster scope stats ` in favour of filter scope stats. +* fault: added overrides for default runtime keys in :ref:`HTTPFault ` filter. +* grpc: added :ref:`AWS IAM grpc credentials extension ` for AWS-managed xDS. +* grpc: added :ref:`gRPC stats filter ` for collecting stats about gRPC calls and streaming message counts. +* grpc-json: added support for :ref:`ignoring unknown query parameters `. +* grpc-json: added support for :ref:`the grpc-status-details-bin header `. +* header to metadata: added :ref:`PROTOBUF_VALUE ` and :ref:`ValueEncode ` to support protobuf Value and Base64 encoding. +* http: added a default one hour idle timeout to upstream and downstream connections. HTTP connections with no streams and no activity will be closed after one hour unless the default idle_timeout is overridden. To disable upstream idle timeouts, set the :ref:`idle_timeout ` to zero in Cluster :ref:`http_protocol_options `. To disable downstream idle timeouts, either set :ref:`idle_timeout ` to zero in the HttpConnectionManager :ref:`common_http_protocol_options ` or set the deprecated :ref:`connection manager ` field to zero. +* http: added the ability to format HTTP/1.1 header keys using :ref:`header_key_format `. * http: added the ability to reject HTTP/1.1 requests with invalid HTTP header values, using the runtime feature ``envoy.reloadable_features.strict_header_validation``. * http: changed Envoy to forward existing x-forwarded-proto from upstream trusted proxies. Guarded by ``envoy.reloadable_features.trusted_forwarded_proto`` which defaults true. -* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation ` field. -* http: added the ability to :ref:`merge adjacent slashes ` in the path. -* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. +* http: added the ability to configure the behavior of the server response header, via the :ref:`server_header_transformation ` field. +* http: added the ability to :ref:`merge adjacent slashes ` in the path. +* http: :ref:`AUTO ` codec protocol inference now requires the H2 magic bytes to be the first bytes transmitted by a downstream client. * http: remove h2c upgrade headers for HTTP/1 as h2c upgrades are currently not supported. -* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. -* http: support :ref:`host rewrite ` in the dynamic forward proxy. -* http: support :ref:`disabling the filter per route ` in the grpc http1 reverse bridge filter. -* http: added the ability to :ref:`configure max connection duration ` for downstream connections. -* listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. -* listeners: added :ref:`HTTP inspector listener filter `. -* listeners: added :ref:`connection balancer ` +* http: absolute URL support is now on by default. The prior behavior can be reinstated by setting :ref:`allow_absolute_url ` to false. +* http: support :ref:`host rewrite ` in the dynamic forward proxy. +* http: support :ref:`disabling the filter per route ` in the grpc http1 reverse bridge filter. +* http: added the ability to :ref:`configure max connection duration ` for downstream connections. +* listeners: added :ref:`continue_on_listener_filters_timeout ` to configure whether a listener will still create a connection when listener filters time out. +* listeners: added :ref:`HTTP inspector listener filter `. +* listeners: added :ref:`connection balancer ` configuration for TCP listeners. * listeners: listeners now close the listening socket as part of the draining stage as soon as workers stop accepting their connections. * lua: extended ``httpCall()`` and ``respond()`` APIs to accept headers with entry values that can be a string or table of strings. * lua: extended ``dynamicMetadata:set()`` to allow setting complex values. * metrics_service: added support for flushing histogram buckets. -* outlier_detector: added :ref:`support for the grpc-status response header ` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true. +* outlier_detector: added :ref:`support for the grpc-status response header ` by mapping it to HTTP status. Guarded by envoy.reloadable_features.outlier_detection_support_for_grpc_status which defaults to true. * performance: new buffer implementation enabled by default (to disable add "--use-libevent-buffers 1" to the command-line arguments when starting Envoy). * performance: stats symbol table implementation (disabled by default; to test it, add "--use-fake-symbol-table 0" to the command-line arguments when starting Envoy). -* rbac: added support for DNS SAN as :ref:`principal_name `. -* redis: added :ref:`enable_command_stats ` to enable :ref:`per command statistics ` for upstream clusters. -* redis: added :ref:`read_policy ` to allow reading from redis replicas for Redis Cluster deployments. +* rbac: added support for DNS SAN as :ref:`principal_name `. +* redis: added :ref:`enable_command_stats ` to enable :ref:`per command statistics ` for upstream clusters. +* redis: added :ref:`read_policy ` to allow reading from redis replicas for Redis Cluster deployments. * redis: fixed a bug where the redis health checker ignored the upstream auth password. * redis: enable_hashtaging is always enabled when the upstream uses open source Redis cluster protocol. -* regex: introduced new :ref:`RegexMatcher ` type that +* regex: introduced new :ref:`RegexMatcher ` type that provides a safe regex implementation for untrusted user input. This type is now used in all configuration that processes user provided input. See :ref:`deprecated configuration details - ` for more information. -* rbac: added conditions to the policy, see :ref:`condition `. -* router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. -* router: :ref:`scoped routing ` is supported. -* router: added new :ref:`retriable-headers ` retry policy. Retries can now be configured to trigger by arbitrary response header matching. + ` for more information. +* rbac: added conditions to the policy, see :ref:`condition `. +* router: added :ref:`rq_retry_skipped_request_not_complete ` counter stat to router stats. +* router: :ref:`scoped routing ` is supported. +* router: added new :ref:`retriable-headers ` retry policy. Retries can now be configured to trigger by arbitrary response header matching. * router: added ability for most specific header mutations to take precedence, see :ref:`route configuration's most specific - header mutations wins flag `. -* router: added :ref:`respect_expected_rq_timeout ` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster. -* router: added new :ref:`retriable request headers ` to route configuration, to allow limiting buffering for retries and shadowing. -* router: added new :ref:`retriable request headers ` to retry policies. Retries can now be configured to only trigger on request header match. + header mutations wins flag `. +* router: added :ref:`respect_expected_rq_timeout ` that instructs ingress Envoy to respect :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress Envoy, when deriving timeout for upstream cluster. +* router: added new :ref:`retriable request headers ` to route configuration, to allow limiting buffering for retries and shadowing. +* router: added new :ref:`retriable request headers ` to retry policies. Retries can now be configured to only trigger on request header match. * router: added the ability to match a route based on whether a TLS certificate has been - :ref:`presented ` by the + :ref:`presented ` by the downstream connection. * router check tool: added coverage reporting & enforcement. * router check tool: added comprehensive coverage reporting. @@ -93,68 +93,68 @@ Changes * router check tool: added coverage reporting for direct response routes. * runtime: allows for the ability to parse boolean values. * runtime: allows for the ability to parse integers as double values and vice-versa. -* sds: added :ref:`session_ticket_keys_sds_secret_config ` for loading TLS Session Ticket Encryption Keys using SDS API. +* sds: added :ref:`session_ticket_keys_sds_secret_config ` for loading TLS Session Ticket Encryption Keys using SDS API. * server: added a post initialization lifecycle event, in addition to the existing startup and shutdown events. -* server: added :ref:`per-handler listener stats ` and - :ref:`per-worker watchdog stats ` to help diagnosing event +* server: added :ref:`per-handler listener stats ` and + :ref:`per-worker watchdog stats ` to help diagnosing event loop imbalance and general performance issues. * stats: added unit support to histogram. * tcp_proxy: the default :ref:`idle_timeout - ` is now 1 hour. + ` is now 1 hour. * thrift_proxy: fixed crashing bug on invalid transport/protocol framing. * thrift_proxy: added support for stripping service name from method when using the multiplexed protocol. * tls: added verification of IP address SAN fields in certificates against configured SANs in the certificate validation context. * tracing: added support to the Zipkin reporter for sending list of spans as Zipkin JSON v2 and protobuf message over HTTP. certificate validation context. * tracing: added tags for gRPC response status and message. -* tracing: added :ref:`max_path_tag_length ` to support customizing the length of the request path included in the extracted `http.url `_ tag. -* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. -* upstream: added :ref:`transport_socket_matches `, support using different transport socket config when connecting to different upstream endpoints within a cluster. -* upstream: added network filter chains to upstream connections, see :ref:`filters `. -* upstream: added new :ref:`failure-percentage based outlier detection ` mode. +* tracing: added :ref:`max_path_tag_length ` to support customizing the length of the request path included in the extracted `http.url `_ tag. +* upstream: added :ref:`an option ` that allows draining HTTP, TCP connection pools on cluster membership change. +* upstream: added :ref:`transport_socket_matches `, support using different transport socket config when connecting to different upstream endpoints within a cluster. +* upstream: added network filter chains to upstream connections, see :ref:`filters `. +* upstream: added new :ref:`failure-percentage based outlier detection ` mode. * upstream: uses p2c to select hosts for least-requests load balancers if all host weights are the same, even in cases where weights are not equal to 1. -* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. +* upstream: added :ref:`fail_traffic_on_panic ` to allow failing all requests to a cluster during panic state. * zookeeper: parses responses and emits latency stats. Deprecated ---------- -* The ORIGINAL_DST_LB :ref:`load balancing policy ` is +* The ORIGINAL_DST_LB :ref:`load balancing policy ` is deprecated, use CLUSTER_PROVIDED policy instead when configuring an :ref:`original destination - cluster `. -* The `regex` field in :ref:`StringMatcher ` has been + cluster `. +* The `regex` field in :ref:`StringMatcher ` has been deprecated in favor of the ``safe_regex`` field. -* The `regex` field in :ref:`RouteMatch ` has been +* The `regex` field in :ref:`RouteMatch ` has been deprecated in favor of the ``safe_regex`` field. * The ``allow_origin`` and ``allow_origin_regex`` fields in :ref:`CorsPolicy - ` have been deprecated in favor of the + ` have been deprecated in favor of the ``allow_origin_string_match`` field. -* The ``pattern`` and ``method`` fields in :ref:`VirtualCluster ` +* The ``pattern`` and ``method`` fields in :ref:`VirtualCluster ` have been deprecated in favor of the ``headers`` field. -* The `regex_match` field in :ref:`HeaderMatcher ` has been +* The `regex_match` field in :ref:`HeaderMatcher ` has been deprecated in favor of the ``safe_regex_match`` field. * The ``value`` and ``regex`` fields in :ref:`QueryParameterMatcher - ` has been deprecated in favor of the ``string_match`` + ` has been deprecated in favor of the ``string_match`` and ``present_match`` fields. * The :option:`--allow-unknown-fields` command-line option, use :option:`--allow-unknown-static-fields` instead. * The use of HTTP_JSON_V1 :ref:`Zipkin collector endpoint version - ` or not explicitly + ` or not explicitly specifying it is deprecated, use HTTP_JSON or HTTP_PROTO instead. * The `operation_name` field in :ref:`HTTP connection manager - ` + ` has been deprecated in favor of the ``traffic_direction`` field in - :ref:`Listener `. The latter takes priority if + :ref:`Listener `. The latter takes priority if specified. -* The `tls_context` field in :ref:`Filter chain ` message - and :ref:`Cluster ` message have been deprecated in favor of +* The `tls_context` field in :ref:`Filter chain ` message + and :ref:`Cluster ` message have been deprecated in favor of ``transport_socket`` with name ``envoy.transport_sockets.tls``. The latter takes priority if specified. * The ``use_http2`` field in - :ref:`HTTP health checker ` has been deprecated in + :ref:`HTTP health checker ` has been deprecated in favor of the ``codec_client_type`` field. -* The use of :ref:`gRPC bridge filter ` for +* The use of :ref:`gRPC bridge filter ` for gRPC stats has been deprecated in favor of the dedicated :ref:`gRPC stats - filter ` + filter ` * Ext_authz filter stats ``ok``, ``error``, ``denied``, ``failure_mode_allowed`` in *cluster..ext_authz.* namespace is deprecated. Use *http..ext_authz.* namespace to access same counters instead. diff --git a/docs/root/version_history/v1.12.3.rst b/docs/root/version_history/v1.12.3.rst index 53b87280ad7b..a5cc8b2241ac 100644 --- a/docs/root/version_history/v1.12.3.rst +++ b/docs/root/version_history/v1.12.3.rst @@ -6,6 +6,6 @@ Changes * buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. * sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. diff --git a/docs/root/version_history/v1.12.4.rst b/docs/root/version_history/v1.12.4.rst index 7b606d34dbce..c40c72182b59 100644 --- a/docs/root/version_history/v1.12.4.rst +++ b/docs/root/version_history/v1.12.4.rst @@ -4,5 +4,5 @@ Changes ------- -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.12.5.rst b/docs/root/version_history/v1.12.5.rst index dcca35f09aef..4ceffcde602f 100644 --- a/docs/root/version_history/v1.12.5.rst +++ b/docs/root/version_history/v1.12.5.rst @@ -4,8 +4,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.13.0.rst b/docs/root/version_history/v1.13.0.rst index b29783554703..fe9364b5573f 100644 --- a/docs/root/version_history/v1.13.0.rst +++ b/docs/root/version_history/v1.13.0.rst @@ -4,82 +4,82 @@ Changes ------- -* access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. -* admin: added the ability to filter :ref:`/config_dump `. -* access log: added a :ref:`typed JSON logging mode ` to output access logs in JSON format with non-string values -* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters ` to work for http requests +* access log: added FILTER_STATE :ref:`access log formatters ` and gRPC access logger. +* admin: added the ability to filter :ref:`/config_dump `. +* access log: added a :ref:`typed JSON logging mode ` to output access logs in JSON format with non-string values +* access log: fixed UPSTREAM_LOCAL_ADDRESS :ref:`access log formatters ` to work for http requests * access log: added HOSTNAME. * api: remove all support for v1 -* api: added ability to specify `mode` for :ref:`Pipe `. +* api: added ability to specify `mode` for :ref:`Pipe `. * api: support for the v3 xDS API added. See :ref:`api_supported_versions`. * aws_request_signing: added new alpha HTTP AWS request signing filter * buffer: remove old implementation * build: official released binary is now built against libc++. -* cluster: added :ref:`aggregate cluster ` that allows load balancing between clusters. +* cluster: added :ref:`aggregate cluster ` that allows load balancing between clusters. * config: all category names of internal envoy extensions are prefixed with the 'envoy.' prefix to follow the reverse DNS naming notation. * decompressor: remove decompressor hard assert failure and replace with an error flag. -* ext_authz: added :ref:`configurable ability ` to send the :ref:`certificate ` to the `ext_authz` service. +* ext_authz: added :ref:`configurable ability ` to send the :ref:`certificate ` to the `ext_authz` service. * fault: fixed an issue where the http fault filter would repeatedly check the percentage of abort/delay when the ``x-envoy-downstream-service-cluster`` header was included in the request to ensure that the actual percentage of abort/delay matches the configuration of the filter. * health check: gRPC health checker sets the gRPC deadline to the configured timeout duration. -* health check: added :ref:`TlsOptions ` to allow TLS configuration overrides. -* health check: added :ref:`service_name_matcher ` to better compare the service name patterns for health check identity. +* health check: added :ref:`TlsOptions ` to allow TLS configuration overrides. +* health check: added :ref:`service_name_matcher ` to better compare the service name patterns for health check identity. * http: added strict validation that CONNECT is refused as it is not yet implemented. This can be reversed temporarily by setting the runtime feature ``envoy.reloadable_features.strict_method_validation`` to false. -* http: added support for http1 trailers. To enable use :ref:`enable_trailers `. +* http: added support for http1 trailers. To enable use :ref:`enable_trailers `. * http: added the ability to sanitize headers nominated by the Connection header. This new behavior is guarded by ``envoy.reloadable_features.connection_header_sanitization`` which defaults to true. * http: blocks unsupported transfer-encodings. Can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.reject_unsupported_transfer_encodings`` to false. -* http: support :ref:`auto_host_rewrite_header ` in the dynamic forward proxy. -* jwt_authn: added :ref:`allow_missing ` option that accepts request without token but rejects bad request with bad tokens. -* jwt_authn: added :ref:`bypass_cors_preflight ` to allow bypassing the CORS preflight request. -* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET ` -* listeners: added :ref:`reuse_port ` option. -* logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. -* ratelimit: added :ref:`local rate limit ` network filter. -* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name `. +* http: support :ref:`auto_host_rewrite_header ` in the dynamic forward proxy. +* jwt_authn: added :ref:`allow_missing ` option that accepts request without token but rejects bad request with bad tokens. +* jwt_authn: added :ref:`bypass_cors_preflight ` to allow bypassing the CORS preflight request. +* lb_subset_config: new fallback policy for selectors: :ref:`KEYS_SUBSET ` +* listeners: added :ref:`reuse_port ` option. +* logger: added :ref:`--log-format-escaped ` command line option to escape newline characters in application logs. +* ratelimit: added :ref:`local rate limit ` network filter. +* rbac: added support for matching all subject alt names instead of first in :ref:`principal_name `. * redis: performance improvement for larger split commands by avoiding string copies. * redis: correctly follow MOVE/ASK redirection for mirrored clusters. -* redis: add :ref:`host_degraded_refresh_threshold ` and :ref:`failure_refresh_threshold ` to refresh topology when nodes are degraded or when requests fails. -* router: added histograms to show timeout budget usage to the :ref:`cluster stats `. +* redis: add :ref:`host_degraded_refresh_threshold ` and :ref:`failure_refresh_threshold ` to refresh topology when nodes are degraded or when requests fails. +* router: added histograms to show timeout budget usage to the :ref:`cluster stats `. * router check tool: added support for testing and marking coverage for routes of runtime fraction 0. -* router: added :ref:`request_mirror_policies ` to support sending multiple mirrored requests in one route. -* router: added support for REQ(header-name) :ref:`header formatter `. -* router: added support for percentage-based :ref:`retry budgets ` -* router: allow using a :ref:`query parameter ` for HTTP consistent hashing. +* router: added :ref:`request_mirror_policies ` to support sending multiple mirrored requests in one route. +* router: added support for REQ(header-name) :ref:`header formatter `. +* router: added support for percentage-based :ref:`retry budgets ` +* router: allow using a :ref:`query parameter ` for HTTP consistent hashing. * router: exposed DOWNSTREAM_REMOTE_ADDRESS as custom HTTP request/response headers. -* router: added support for :ref:`max_internal_redirects ` for configurable maximum internal redirect hops. +* router: added support for :ref:`max_internal_redirects ` for configurable maximum internal redirect hops. * router: skip the Location header when the response code is not a 201 or a 3xx. -* router: added :ref:`auto_sni ` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header. +* router: added :ref:`auto_sni ` to support setting SNI to transport socket for new upstream connections based on the downstream HTTP host/authority header. * router: added support for HOSTNAME :ref:`header formatter - `. + `. * server: added the :option:`--disable-extensions` CLI option, to disable extensions at startup. * server: fixed a bug in config validation for configs with runtime layers. -* server: added :ref:`workers_started ` that indicates whether listeners have been fully initialized on workers. -* tcp_proxy: added :ref:`ClusterWeight.metadata_match `. -* tcp_proxy: added :ref:`hash_policy `. +* server: added :ref:`workers_started ` that indicates whether listeners have been fully initialized on workers. +* tcp_proxy: added :ref:`ClusterWeight.metadata_match `. +* tcp_proxy: added :ref:`hash_policy `. * thrift_proxy: added support for cluster header based routing. * thrift_proxy: added stats to the router filter. * tls: remove TLS 1.0 and 1.1 from client defaults -* tls: added support for :ref:`generic string matcher ` for subject alternative names. -* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager ` and the :ref:`HTTP route `. +* tls: added support for :ref:`generic string matcher ` for subject alternative names. +* tracing: added the ability to set custom tags on both the :ref:`HTTP connection manager ` and the :ref:`HTTP route `. * tracing: added upstream_address tag. -* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing `. +* tracing: added initial support for AWS X-Ray (local sampling rules only) :ref:`X-Ray Tracing `. * tracing: added tags for gRPC request path, authority, content-type and timeout. -* udp: added initial support for :ref:`UDP proxy ` +* udp: added initial support for :ref:`UDP proxy ` Deprecated ---------- * The `request_headers_for_tags` field in :ref:`HTTP connection manager - ` + ` has been deprecated in favor of the :ref:`custom_tags - ` field. + ` field. * The `verify_subject_alt_name` field in :ref:`Certificate Validation Context - ` + ` has been deprecated in favor of the :ref:`match_subject_alt_names - ` field. -* The ``request_mirror_policy`` field in :ref:`RouteMatch ` has been deprecated in + ` field. +* The ``request_mirror_policy`` field in :ref:`RouteMatch ` has been deprecated in favor of the ``request_mirror_policies`` field. * The ``service_name`` field in - :ref:`HTTP health checker ` has been deprecated in + :ref:`HTTP health checker ` has been deprecated in favor of the ``service_name_matcher`` field. * The v2 xDS API is deprecated. It will be supported by Envoy until EOY 2020. See :ref:`api_supported_versions`. diff --git a/docs/root/version_history/v1.13.1.rst b/docs/root/version_history/v1.13.1.rst index 46d05ebc9d5c..1b7b97a20018 100644 --- a/docs/root/version_history/v1.13.1.rst +++ b/docs/root/version_history/v1.13.1.rst @@ -6,6 +6,6 @@ Changes * buffer: force copy when appending small slices to OwnedImpl buffer to avoid fragmentation. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. -* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. +* listeners: fixed issue where :ref:`TLS inspector listener filter ` could have been bypassed by a client using only TLS 1.3. +* rbac: added :ref:`url_path ` for matching URL path without the query and fragment string. * sds: fixed the SDS vulnerability that TLS validation context (e.g., subject alt name or hash) cannot be effectively validated in some cases. diff --git a/docs/root/version_history/v1.13.2.rst b/docs/root/version_history/v1.13.2.rst index 5ef942997b7c..fb8703191b37 100644 --- a/docs/root/version_history/v1.13.2.rst +++ b/docs/root/version_history/v1.13.2.rst @@ -4,5 +4,5 @@ Changes ------- -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.13.3.rst b/docs/root/version_history/v1.13.3.rst index 8cdbfe128c93..a83da6a749a6 100644 --- a/docs/root/version_history/v1.13.3.rst +++ b/docs/root/version_history/v1.13.3.rst @@ -5,8 +5,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.0.rst b/docs/root/version_history/v1.14.0.rst index b677dac0f96d..302b9dc44e8f 100644 --- a/docs/root/version_history/v1.14.0.rst +++ b/docs/root/version_history/v1.14.0.rst @@ -5,85 +5,85 @@ Changes ------- * access log: access logger extensions use the "envoy.access_loggers" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* access log: added support for ``%DOWNSTREAM_LOCAL_PORT%`` :ref:`access log formatters `. + of extension names is available in the :ref:`deprecated ` documentation. +* access log: added support for ``%DOWNSTREAM_LOCAL_PORT%`` :ref:`access log formatters `. * access log: fixed ``%DOWSTREAM_DIRECT_REMOTE_ADDRESS%`` when used with PROXY protocol listener filter. -* access log: introduced :ref:`connection-level access loggers `. +* access log: introduced :ref:`connection-level access loggers `. * adaptive concurrency: fixed bug that allowed concurrency limits to drop below the configured minimum. * adaptive concurrency: minRTT is now triggered when the minimum concurrency is maintained for 5 consecutive sampling intervals. -* admin: added support for displaying ip address subject alternate names in :ref:`certs ` end point. +* admin: added support for displaying ip address subject alternate names in :ref:`certs ` end point. * admin: added :http:post:`/reopen_logs` endpoint to control log rotation. * api: froze v2 xDS API. New feature development in the API should occur in v3 xDS. While the v2 xDS API has been deprecated since 1.13.0, it will continue to be supported by Envoy until EOY 2020. See :ref:`api_supported_versions`. -* aws_lambda: added :ref:`AWS Lambda filter ` that converts HTTP requests to Lambda +* aws_lambda: added :ref:`AWS Lambda filter ` that converts HTTP requests to Lambda invokes. This effectively makes Envoy act as an egress gateway to AWS Lambda. * aws_request_signing: a few fixes so that it works with S3. -* config: added stat :ref:`update_time `. -* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. +* config: added stat :ref:`update_time `. +* config: use type URL to select an extension whenever the config type URL (or its previous versions) uniquely identify a typed extension, see :ref:`extension configuration `. * datasource: added retry policy for remote async data source. -* dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. +* dns: added support for :ref:`dns_failure_refresh_rate ` for the :ref:`dns cache ` to set the DNS refresh rate during failures. * dns: the STRICT_DNS cluster now only resolves to 0 hosts if DNS resolution successfully returns 0 hosts. -* eds: added :ref:`hostname ` field for endpoints and :ref:`hostname ` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints. +* eds: added :ref:`hostname ` field for endpoints and :ref:`hostname ` field for endpoint's health check config. This enables auto host rewrite and customizing the host header during health checks for eds endpoints. * ext_authz: disabled the use of lowercase string matcher for headers matching in HTTP-based ``ext_authz``. Can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher`` to false. -* fault: added support for controlling abort faults with :ref:`HTTP header fault configuration ` to the HTTP fault filter. +* fault: added support for controlling abort faults with :ref:`HTTP header fault configuration ` to the HTTP fault filter. * grpc-json: added support for building HTTP request into `google.api.HttpBody `_. * grpc-stats: added option to limit which messages stats are created for. * http: added HTTP/1.1 flood protection. Can be temporarily disabled using the runtime feature ``envoy.reloadable_features.http1_flood_protection``. -* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. -* http: added :ref:`max_stream_duration ` to specify the duration of existing streams. See :ref:`connection and stream timeouts `. +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: added :ref:`max_stream_duration ` to specify the duration of existing streams. See :ref:`connection and stream timeouts `. * http: connection header sanitizing has been modified to always sanitize if there is no upgrade, including when an h2c upgrade attempt has been removed. * http: fixed a bug that could send extra METADATA frames and underflow memory when encoding METADATA frames on a connection that was dispatching data. * http: fixing a bug in HTTP/1.0 responses where Connection: keep-alive was not appended for connections which were kept alive. * http: http filter extensions use the "envoy.filters.http" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. + of extension names is available in the :ref:`deprecated ` documentation. * http: the runtime feature ``http.connection_manager.log_flood_exception`` is removed and replaced with a connection access log response code. * http: upgrade parser library, which removes support for "identity" transfer-encoding value. * listener filters: listener filter extensions use the "envoy.filters.listener" name space. A - mapping of extension names is available in the :ref:`deprecated ` documentation. -* listeners: added :ref:`listener filter matcher api ` to disable individual listener filter on matching downstream connections. -* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config `. -* loadbalancing: added support for :ref:`retry host predicates ` in conjunction with consistent hashing load balancers (ring hash and maglev). + mapping of extension names is available in the :ref:`deprecated ` documentation. +* listeners: added :ref:`listener filter matcher api ` to disable individual listener filter on matching downstream connections. +* loadbalancing: added support for using hostname for consistent hash loadbalancing via :ref:`consistent_hash_lb_config `. +* loadbalancing: added support for :ref:`retry host predicates ` in conjunction with consistent hashing load balancers (ring hash and maglev). * lua: added a parameter to ``httpCall`` that makes it possible to have the call be asynchronous. * lua: added moonjit support. -* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter ` was modified to emit correctly for Mongo v3.2+. -* network filters: added a :ref:`direct response filter `. +* mongo: the stat emitted for queries without a max time set in the :ref:`MongoDB filter ` was modified to emit correctly for Mongo v3.2+. +* network filters: added a :ref:`direct response filter `. * network filters: network filter extensions use the "envoy.filters.network" name space. A mapping - of extension names is available in the :ref:`deprecated ` documentation. -* rbac: added :ref:`remote_ip ` and :ref:`direct_remote_ip ` for matching downstream remote IP address. -* rbac: deprecated :ref:`source_ip ` with :ref:`direct_remote_ip ` and :ref:`remote_ip `. -* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager `. -* retry: added a retry predicate that :ref:`rejects hosts based on metadata. `. + of extension names is available in the :ref:`deprecated ` documentation. +* rbac: added :ref:`remote_ip ` and :ref:`direct_remote_ip ` for matching downstream remote IP address. +* rbac: deprecated :ref:`source_ip ` with :ref:`direct_remote_ip ` and :ref:`remote_ip `. +* request_id_extension: added an ability to extend request ID handling at :ref:`HTTP connection manager `. +* retry: added a retry predicate that :ref:`rejects hosts based on metadata. `. * router: added ability to set attempt count in downstream response, see :ref:`virtual host's include response - attempt count config `. -* router: added additional stats for :ref:`virtual clusters `. -* router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. + attempt count config `. +* router: added additional stats for :ref:`virtual clusters `. +* router: added :ref:`auto_san_validation ` to support overrriding SAN validation to transport socket for new upstream connections based on the downstream HTTP host/authority header. * router: added the ability to match a route based on whether a downstream TLS connection certificate has been - :ref:`validated `. + :ref:`validated `. * router: added support for :ref:`regex_rewrite - ` for path rewriting using regular expressions and capture groups. -* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter `. -* router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. -* router: strip whitespace for :ref:`retry_on `, :ref:`grpc-retry-on header ` and :ref:`retry-on header `. + ` for path rewriting using regular expressions and capture groups. +* router: added support for `%DOWNSTREAM_LOCAL_PORT%` :ref:`header formatter `. +* router: don't ignore :ref:`per_try_timeout ` when the :ref:`global route timeout ` is disabled. +* router: strip whitespace for :ref:`retry_on `, :ref:`grpc-retry-on header ` and :ref:`retry-on header `. * runtime: enabling the runtime feature ``envoy.deprecated_features.allow_deprecated_extension_names`` disables the use of deprecated extension names. * runtime: integer values may now be parsed as booleans. -* sds: added :ref:`GenericSecret ` to support secret of generic type. -* sds: added :ref:`certificate rotation ` support for certificates in static resources. +* sds: added :ref:`GenericSecret ` to support secret of generic type. +* sds: added :ref:`certificate rotation ` support for certificates in static resources. * server: the SIGUSR1 access log reopen warning now is logged at info level. * stat sinks: stat sink extensions use the "envoy.stat_sinks" name space. A mapping of extension - names is available in the :ref:`deprecated ` documentation. + names is available in the :ref:`deprecated ` documentation. * thrift_proxy: added router filter stats to docs. -* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption `. +* tls: added configuration to disable stateless TLS session resumption :ref:`disable_stateless_session_resumption `. * tracing: added gRPC service configuration to the OpenCensus Stackdriver and OpenCensus Agent tracers. * tracing: tracer extensions use the "envoy.tracers" name space. A mapping of extension names is - available in the :ref:`deprecated ` documentation. -* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster `, and :ref:`virtual cluster ` stats. -* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode `. + available in the :ref:`deprecated ` documentation. +* upstream: added ``upstream_rq_retry_limit_exceeded`` to :ref:`cluster `, and :ref:`virtual cluster ` stats. +* upstream: changed load distribution algorithm when all priorities enter :ref:`panic mode `. * upstream: combined HTTP/1 and HTTP/2 connection pool code. This means that circuit breaker limits for both requests and connections apply to both pool types. Also, HTTP/2 now has the option to limit concurrent requests on a connection, and allow multiple draining @@ -91,7 +91,7 @@ Changes period by disabling runtime feature ``envoy.reloadable_features.new_http1_connection_pool_behavior`` or ``envoy.reloadable_features.new_http2_connection_pool_behavior`` and then re-configure your clusters or restart Envoy. The behavior will not switch until the connection pools are recreated. The new - circuit breaker behavior is described :ref:`here `. + circuit breaker behavior is described :ref:`here `. * zlib: by default zlib is initialized to use its default strategy (Z_DEFAULT_STRATEGY) instead of the fixed one (Z_FIXED). The difference is that the use of dynamic Huffman codes is enabled now resulting in better compression ratio for normal data. @@ -101,7 +101,7 @@ Deprecated * The previous behavior for upstream connection pool circuit breaking described `here `_ has - been deprecated in favor of the new behavior described :ref:`here `. + been deprecated in favor of the new behavior described :ref:`here `. * Access Logger, Listener Filter, HTTP Filter, Network Filter, Stats Sink, and Tracer names have been deprecated in favor of the extension name from the envoy build system. Disable the runtime feature "envoy.deprecated_features.allow_deprecated_extension_names" to disallow the deprecated @@ -166,27 +166,27 @@ Deprecated * Tracers * The previous behavior of auto ignoring case in headers matching: - :ref:`allowed_headers `, - :ref:`allowed_upstream_headers `, - and :ref:`allowed_client_headers ` + :ref:`allowed_headers `, + :ref:`allowed_upstream_headers `, + and :ref:`allowed_client_headers ` of HTTP-based ``ext_authz`` has been deprecated in favor of explicitly setting the - :ref:`ignore_case ` field. + :ref:`ignore_case ` field. * The ``header_fields``, ``custom_header_fields``, and ``additional_headers`` fields for the route checker tool have been deprecated in favor of ``request_header_fields``, ``response_header_fields``, ``additional_request_headers``, and ``additional_response_headers``. * The ``content_length``, ``content_type``, ``disable_on_etag_header`` and ``remove_accept_encoding_header`` - fields in :ref:`HTTP Gzip filter config ` have + fields in :ref:`HTTP Gzip filter config ` have been deprecated in favor of ``compressor``. -* The statistics counter ``header_gzip`` in :ref:`HTTP Gzip filter ` +* The statistics counter ``header_gzip`` in :ref:`HTTP Gzip filter ` has been deprecated in favor of ``header_compressor_used``. * Support for the undocumented HTTP/1.1 ``:no-chunks`` pseudo-header has been removed. If an extension was using this it can achieve the same behavior via the new ``http1StreamEncoderOptions()`` API. * The grpc_stats filter behavior of by default creating a new stat for every message type seen is deprecated. The default will switch to only creating a fixed set of stats. The previous behavior can be enabled by enabling - :ref:`stats_for_all_methods `, + :ref:`stats_for_all_methods `, and the previous default can be enabled until the end of the deprecation period by enabling runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. -* The :ref:`source_ip ` field in +* The :ref:`source_ip ` field in `RBAC `_ has been deprecated - in favor of :ref:`direct_remote_ip ` and - :ref:`remote_ip `. + in favor of :ref:`direct_remote_ip ` and + :ref:`remote_ip `. diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst index c7d4731d865b..a9867a9afe0c 100644 --- a/docs/root/version_history/v1.14.2.rst +++ b/docs/root/version_history/v1.14.2.rst @@ -5,10 +5,10 @@ Changes ------- * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. -* http: the :ref:`stream_idle_timeout ` +* http: the :ref:`stream_idle_timeout ` now also defends against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. -* listener: Add runtime support for :ref:`per-listener limits ` on +* listener: Add runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: Add runtime support for :ref:`global limits ` +* overload management: Add runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.3.rst b/docs/root/version_history/v1.14.3.rst index 523a4fc9a607..66526566ea0a 100644 --- a/docs/root/version_history/v1.14.3.rst +++ b/docs/root/version_history/v1.14.3.rst @@ -4,8 +4,8 @@ Changes ------- * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.7.rst b/docs/root/version_history/v1.14.7.rst index 041b5da018f8..d476b23bd2e0 100644 --- a/docs/root/version_history/v1.14.7.rst +++ b/docs/root/version_history/v1.14.7.rst @@ -2,7 +2,7 @@ ======================= Changes ------- -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: fixed bugs in datadog and squash filter's handling of responses with no bodies. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. diff --git a/docs/root/version_history/v1.15.0.rst b/docs/root/version_history/v1.15.0.rst index d565e35bb5cc..a730b9d28391 100644 --- a/docs/root/version_history/v1.15.0.rst +++ b/docs/root/version_history/v1.15.0.rst @@ -8,7 +8,7 @@ Incompatible Behavior Changes * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * client_ssl_auth: the ``auth_ip_white_list`` stat has been renamed to - :ref:`auth_ip_allowlist `. + :ref:`auth_ip_allowlist `. * header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). * router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature ``envoy.reloadable_features.preserve_query_string_in_path_redirects`` to false. * tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_wildcard_matching`` to false. @@ -17,9 +17,9 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.disallow_unbounded_access_logs`` to false. +* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.disallow_unbounded_access_logs`` to false. * build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable ``ENVOY_UID`` to ``0``. ``ENVOY_UID`` and ``ENVOY_GID`` can be used to set the envoy user's ``uid`` and ``gid`` respectively. -* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. +* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.early_errors_via_hcm`` to false. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_connection_close`` to false. @@ -31,7 +31,7 @@ Minor Behavior Changes * http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.http_default_alpn`` to false. * listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. * router: extended to allow retries of streaming or incomplete requests. This removes stat ``rq_retry_skipped_request_not_complete``. -* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. Bug Fixes --------- @@ -43,22 +43,22 @@ Bug Fixes * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. * grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. -* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes ` is enabled. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes ` is enabled. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. * http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature ``envoy.reloadable_features.fix_connection_close`` to false. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. * prometheus stats: fixed the sort order of output lines to comply with the standard. -* udp: the :ref:`reuse_port ` listener option must now be +* udp: the :ref:`reuse_port ` listener option must now be specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a bug fix. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * http: removed legacy connection pool code and their runtime features: ``envoy.reloadable_features.new_http1_connection_pool_behavior`` and ``envoy.reloadable_features.new_http2_connection_pool_behavior``. @@ -66,101 +66,101 @@ Removed Config or Runtime New Features ------------ -* access loggers: added file access logger config :ref:`log_format `. +* access loggers: added file access logger config :ref:`log_format `. * access loggers: added GRPC_STATUS operator on logging format. -* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. -* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. -* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. +* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. -* cluster: added an extension point for configurable :ref:`upstreams `. -* compressor: exposed generic :ref:`compressor ` filter to users. -* config: added :ref:`identifier ` stat that reflects control plane identifier. -* config: added :ref:`version_text ` stat that reflects xDS version. -* decompressor: exposed generic :ref:`decompressor ` filter to users. -* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. -* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. +* cluster: added an extension point for configurable :ref:`upstreams `. +* compressor: exposed generic :ref:`compressor ` filter to users. +* config: added :ref:`identifier ` stat that reflects control plane identifier. +* config: added :ref:`version_text ` stat that reflects xDS version. +* decompressor: exposed generic :ref:`decompressor ` filter to users. +* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. This behavior can be temporarily disabled by the runtime feature ``envoy.reloadable_features.enable_dns_cache_circuit_breakers``. - If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. -* dynamic forward proxy: added :ref:`allow_insecure_cluster_options ` to allow disabling of auto_san_validation and auto_sni. -* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. -* ext_authz filter: added API version field for both :ref:`HTTP ` - and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. -* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. + If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. +* dynamic forward proxy: added :ref:`allow_insecure_cluster_options ` to allow disabling of auto_san_validation and auto_sni. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. +* ext_authz filter: added API version field for both :ref:`HTTP ` + and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. +* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults - are applied to using :ref:`HTTP headers ` to the HTTP fault filter. + are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using - :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. + :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. * filter: added ``upstream_rq_time`` stats to the GPRC stats filter. - Disabled by default and can be enabled via :ref:`enable_upstream_stats `. -* grpc: added support for Google gRPC :ref:`custom channel arguments `. + Disabled by default and can be enabled via :ref:`enable_upstream_stats `. +* grpc: added support for Google gRPC :ref:`custom channel arguments `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. * grpc-json: send a ``x-envoy-original-method`` header to grpc services. * gzip filter: added option to set zlib's next output buffer size. * hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. * header to metadata: added support for regex substitutions on header values. -* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. -* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. -* http: added :ref:`stripping port from host header ` support. -* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation ` for details. +* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. +* http: added :ref:`stripping port from host header ` support. +* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation ` for details. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature ``envoy.reloadable_features.listener_in_place_filterchain_update`` to false. - Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. + Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. * logger: added ``--log-format-prefix-with-location`` command line option to prefix '%v' with file path and line number. * lrs: added new ``envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters`` field in LRS response, which allows management servers to avoid explicitly listing all clusters it is interested in; behavior is allowed based on new ``envoy.lrs.supports_send_all_clusters`` capability - in :ref:`client_features ` field. + in :ref:`client_features ` field. * lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. -* lua: added :ref:`per route config ` for Lua filter. +* lua: added :ref:`per route config ` for Lua filter. * lua: added tracing to the ``httpCall()`` API. -* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* network filters: added a :ref:`postgres proxy filter `. -* network filters: added a :ref:`rocketmq proxy filter `. +* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* network filters: added a :ref:`postgres proxy filter `. +* network filters: added a :ref:`rocketmq proxy filter `. * performance: enabled stats symbol table implementation by default. To disable it, add ``--use-fake-symbol-table 1`` to the command-line arguments when starting Envoy. -* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. -* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. -* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. -* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. -* request_id: added to :ref:`always_set_request_id_in_response setting ` - to set :ref:`x-request-id ` header in response even if +* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. +* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. +* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. +* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. +* request_id: added to :ref:`always_set_request_id_in_response setting ` + to set :ref:`x-request-id ` header in response even if tracing is not forced. * router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy - ` field. + ` field. * router: added regex substitution support for header based hashing. * router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters - `. -* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. -* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. + `. +* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. +* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). -* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. -* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. +* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. +* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: made tracing configuration fully dynamic and every HTTP connection manager - can now have a separate :ref:`tracing provider `. -* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. + can now have a separate :ref:`tracing provider `. +* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. Deprecated ---------- -* Tracing provider configuration as part of :ref:`bootstrap config ` +* Tracing provider configuration as part of :ref:`bootstrap config ` has been deprecated in favor of configuration as part of :ref:`HTTP connection manager - `. -* The :ref:`HTTP Gzip filter ` has been deprecated in favor of - :ref:`Compressor `. -* The * :ref:`GoogleRE2.max_program_size ` + `. +* The :ref:`HTTP Gzip filter ` has been deprecated in favor of + :ref:`Compressor `. +* The * :ref:`GoogleRE2.max_program_size ` field is now deprecated. Management servers are expected to validate regexp program sizes instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. -* The :ref:`internal_redirect_action ` - field and :ref:`max_internal_redirects ` field +* The :ref:`internal_redirect_action ` + field and :ref:`max_internal_redirects ` field are now deprecated. This changes the implemented default cross scheme redirect behavior. All cross scheme redirects are disallowed by default. To restore the previous behavior, set allow_cross_scheme_redirect=true and use - :ref:`safe_cross_scheme `, - in :ref:`predicates `. -* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. + :ref:`safe_cross_scheme `, + in :ref:`predicates `. +* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. * A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_deprecated_v2_api_warning`` to ``false``. -* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_dns_cache_circuit_breakers`` to ``false``. +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting ``envoy.reloadable_features.enable_dns_cache_circuit_breakers`` to ``false``. diff --git a/docs/root/version_history/v1.15.4.rst b/docs/root/version_history/v1.15.4.rst index f40b70a69cfd..10d03f007f27 100644 --- a/docs/root/version_history/v1.15.4.rst +++ b/docs/root/version_history/v1.15.4.rst @@ -4,7 +4,7 @@ Changes ------- -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. * http: fixed bugs in datadog and squash filter's handling of responses with no bodies. @@ -13,7 +13,7 @@ Changes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.15.5.rst b/docs/root/version_history/v1.15.5.rst index ca0fcbab5d56..7d4fbe370c32 100644 --- a/docs/root/version_history/v1.15.5.rst +++ b/docs/root/version_history/v1.15.5.rst @@ -6,11 +6,11 @@ Changes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.16.0.rst b/docs/root/version_history/v1.16.0.rst index 32cb2a70dd23..51c332b3bc99 100644 --- a/docs/root/version_history/v1.16.0.rst +++ b/docs/root/version_history/v1.16.0.rst @@ -5,10 +5,10 @@ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD `. +* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in :repo:`BUILD `. * build: tcmalloc changes require Clang 9. This requirement change can be avoided by building with ``--define tcmalloc=gperftools`` to use the older tcmalloc code. * config: additional warnings have been added for the use of v2 APIs. These appear as log messages - and are also captured in the :ref:`deprecated_feature_use ` counter after server + and are also captured in the :ref:`deprecated_feature_use ` counter after server initialization. * dns: ``envoy.restart_features.use_apple_api_for_dns_lookups`` is on by default. This flag only affects Apple platforms (macOS, iOS). It is incompatible to have the runtime flag set to true at the same time as specifying the ````use_tcp_for_dns_lookups```` option or custom dns resolvers. Doing so will cause failure. * watchdog: added two guarddogs, breaking the aggregated stats for the single guarddog system. The aggregated stats for the guarddogs will have the following prefixes: ``main_thread`` and ``workers``. Concretely, anything monitoring ``server.watchdog_miss`` and ``server.watchdog_mega_miss`` will need to be updated. @@ -19,20 +19,20 @@ Minor Behavior Changes * adaptive concurrency: added a response body / grpc-message header for rejected requests. * async_client: minor change to handling header only responses more similar to header-with-empty-body responses. -* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. +* build: an :ref:`Ubuntu based debug image ` is built and published in DockerHub. * build: the debug information will be generated separately to reduce target size and reduce compilation time when build in compilation mode ``dbg`` and ``opt``. Users will need to build dwp file to debug with gdb. * compressor: always insert ``Vary`` headers for compressible resources even if it's decided not to compress a response due to incompatible ``Accept-Encoding`` value. The ``Vary`` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. * ext_authz filter: request timeout will now count from the time the check request is created, instead of when it becomes active. This makes sure that the timeout is enforced even if the ext_authz cluster's circuit breaker is engaged. - This behavior can be reverted by setting runtime feature ``envoy.reloadable_features.ext_authz_measure_timeout_on_check_created`` to false. When enabled, a new ``ext_authz.timeout`` stat is counted when timeout occurs. See :ref:`stats `. + This behavior can be reverted by setting runtime feature ``envoy.reloadable_features.ext_authz_measure_timeout_on_check_created`` to false. When enabled, a new ``ext_authz.timeout`` stat is counted when timeout occurs. See :ref:`stats `. * grpc reverse bridge: upstream headers will no longer be propagated when the response is missing or contains an unexpected content-type. -* http: added :ref:`contains `, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. -* http: added :ref:`contains `, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. -* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). -* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: added :ref:`contains `, a new string matcher type which matches if the value of the string has the substring mentioned in contains matcher. +* http: added :ref:`contains `, a new header matcher type which matches if the value of the header has the substring mentioned in contains matcher. +* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message ` to true to restore prior HTTP/1.1 behavior (i.e. connection isn't terminated) and to retain prior HTTP/2 behavior (i.e. connection is terminated). +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: applying route level header modifications to local replies sent on that route. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.always_apply_route_header_rules`` to false. -* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2`` to false. +* http: changed Envoy to send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2`` to false. * http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.allow_response_for_timeout`` to false. * http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` to false. * http: changed how local replies are processed for requests which transform from grpc to not-grpc, or not-grpc to grpc. Previously the initial generated reply depended on which filter sent the reply, but now the reply is consistently generated the way the downstream expects. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.unify_grpc_handling`` to false. @@ -52,7 +52,7 @@ Minor Behavior Changes * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature ``envoy.reloadable_features.consume_all_retry_headers`` to false. * stats: the fake symbol table implemention has been removed from the binary, and the option ``--use-fake-symbol-table`` is now a no-op with a warning. * thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. -* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs `. It works with :ref:`watchdog ` by having the worker thread and main thread watchdogs have same config. +* watchdog: replaced single watchdog with separate watchdog configuration for worker threads and for the main thread configured via :ref:`Watchdogs `. It works with :ref:`watchdog ` by having the worker thread and main thread watchdogs have same config. Bug Fixes --------- @@ -74,7 +74,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * http: removed legacy header sanitization and the runtime guard ``envoy.reloadable_features.strict_header_validation``. * http: removed legacy transfer-encoding enforcement and runtime guard ``envoy.reloadable_features.reject_unsupported_transfer_encodings``. @@ -83,101 +83,101 @@ Removed Config or Runtime New Features ------------ -* access log: added a :ref:`dynamic metadata filter ` for access logs, which filters whether to log based on matching dynamic metadata. -* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. -* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% ` as a log command operator about why the connection is terminated by Envoy. -* access log: added support for nested objects in :ref:`JSON logging mode `. -* access log: added :ref:`omit_empty_values ` option to omit unset value from formatted log. -* access log: added support for :ref:`%CONNECTION_ID% ` for the downstream connection identifier. -* admin: added :ref:`circuit breakers settings ` information to GET /clusters?format=json :ref:`cluster status `. -* admin: added :ref:`node ` information to GET /server_info :ref:`response object `. -* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. -* admission control: added the :ref:`admission control ` filter for client-side request throttling. -* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. -* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. +* access log: added a :ref:`dynamic metadata filter ` for access logs, which filters whether to log based on matching dynamic metadata. +* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. +* access log: added support for :ref:`%CONNECTION_TERMINATION_DETAILS% ` as a log command operator about why the connection is terminated by Envoy. +* access log: added support for nested objects in :ref:`JSON logging mode `. +* access log: added :ref:`omit_empty_values ` option to omit unset value from formatted log. +* access log: added support for :ref:`%CONNECTION_ID% ` for the downstream connection identifier. +* admin: added :ref:`circuit breakers settings ` information to GET /clusters?format=json :ref:`cluster status `. +* admin: added :ref:`node ` information to GET /server_info :ref:`response object `. +* admin: added the ability to dump init manager unready targets information :ref:`/init_dump ` and :ref:`/init_dump?mask={} `. +* admission control: added the :ref:`admission control ` filter for client-side request throttling. +* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. +* cluster: added new :ref:`connection_pool_per_downstream_connection ` flag, which enable creation of a new connection pool for each downstream connection. * decompressor filter: reports compressed and uncompressed bytes in trailers. * dns: added support for doing DNS resolution using Apple's DnsService APIs in Apple platforms (macOS, iOS). This feature is ON by default, and is only configurable via the ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime key. Note that this value is latched during server startup and changing the runtime key is a no-op during the lifetime of the process. -* dns_filter: added support for answering :ref:`service record ` queries. -* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups ` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters `. -* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. - The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. -* ext_authz filter: added :ref:`stat_prefix ` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter. -* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata `. -* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove ` before forwarding it to the upstream. -* ext_authz filter: added support for sending :ref:`raw bytes as request body ` of a gRPC check request by setting :ref:`pack_as_bytes ` to true. -* ext_authz_filter: added :ref:`disable_request_body_buffering ` to disable request data buffering per-route. +* dns_filter: added support for answering :ref:`service record ` queries. +* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups ` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters `. +* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. + The emitted dynamic metadata is set by :ref:`dynamic metadata ` field in a returned :ref:`CheckResponse `. +* ext_authz filter: added :ref:`stat_prefix ` as an optional additional prefix for the statistics emitted from `ext_authz` HTTP filter. +* ext_authz filter: added support for enabling the filter based on :ref:`dynamic metadata `. +* ext_authz filter: added support for letting the authorization server instruct Envoy to remove headers from the original request by setting the new field :ref:`headers_to_remove ` before forwarding it to the upstream. +* ext_authz filter: added support for sending :ref:`raw bytes as request body ` of a gRPC check request by setting :ref:`pack_as_bytes ` to true. +* ext_authz_filter: added :ref:`disable_request_body_buffering ` to disable request data buffering per-route. * grpc-json: support specifying ``response_body`` field in for ``google.api.HttpBody`` message. -* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. -* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. +* hds: added :ref:`cluster_endpoints_health ` to HDS responses, keeping endpoints in the same groupings as they were configured in the HDS specifier by cluster and locality instead of as a flat list. +* hds: added :ref:`transport_socket_matches ` to HDS cluster health check specifier, so the existing match filter :ref:`transport_socket_match_criteria ` in the repeated field :ref:`health_checks ` has context to match against. This unblocks support for health checks over HTTPS and HTTP/2. * hot restart: added :option:`--socket-path` and :option:`--socket-mode` to configure UDS path in the filesystem and set permission to it. -* http: added HTTP/2 support for :ref:`connection keepalive ` via PING. -* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. -* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. -* http: added :ref:`CDN Loop filter ` and :ref:`documentation `. -* http: added :ref:`MaxStreamDuration proto ` for configuring per-route downstream duration timeouts. +* http: added HTTP/2 support for :ref:`connection keepalive ` via PING. +* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. +* http: added :ref:`allow_chunked_length ` configuration option for HTTP/1 codec to allow processing requests/responses with both Content-Length and Transfer-Encoding: chunked headers. If such message is served and option is enabled - per RFC Content-Length is ignored and removed. +* http: added :ref:`CDN Loop filter ` and :ref:`documentation `. +* http: added :ref:`MaxStreamDuration proto ` for configuring per-route downstream duration timeouts. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default for HTTP/1.1 and HTTP/2 server connections. The new codecs can be enabled for testing by setting the runtime feature ``envoy.reloadable_features.new_codec_behavior`` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. * http: modified the HTTP header-map data-structure to use an underlying dictionary and a list (no change to the header-map API). To conform with previous versions, the use of a dictionary is currently disabled. It can be enabled by setting the ``envoy.http.headermap.lazy_map_min_size`` runtime feature to a non-negative number which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Our current benchmarks suggest that the value 3 is a good threshold for most workloads. -* load balancer: added :ref:`RingHashLbConfig ` to configure the table size of Maglev consistent hash. -* load balancer: added a :ref:`configuration ` option to specify the active request bias used by the least request load balancer. -* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. -* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. -* local_reply config: added :ref:`content_type ` field to set content-type. -* lua: added Lua APIs to access :ref:`SSL connection info ` object. -* lua: added Lua API for :ref:`base64 escaping a string `. -* lua: added Lua API for :ref:`setting the current buffer content `. -* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. -* overload management: add :ref:`scaling ` trigger for OverloadManager actions. -* postgres network filter: :ref:`metadata ` is produced based on SQL query. -* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket `. -* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. -* ratelimit: added :ref:`per route config ` for rate limit filter. -* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. +* load balancer: added :ref:`RingHashLbConfig ` to configure the table size of Maglev consistent hash. +* load balancer: added a :ref:`configuration ` option to specify the active request bias used by the least request load balancer. +* load balancer: added an :ref:`option ` to optimize subset load balancing when there is only one host per subset. +* load balancer: added support for bounded load per host for consistent hash load balancers via :ref:`hash_balance_factor `. +* local_reply config: added :ref:`content_type ` field to set content-type. +* lua: added Lua APIs to access :ref:`SSL connection info ` object. +* lua: added Lua API for :ref:`base64 escaping a string `. +* lua: added Lua API for :ref:`setting the current buffer content `. +* lua: added new :ref:`source_code ` field to support the dispatching of inline Lua code in per route configuration of Lua filter. +* overload management: add :ref:`scaling ` trigger for OverloadManager actions. +* postgres network filter: :ref:`metadata ` is produced based on SQL query. +* proxy protocol: added support for generating the header upstream using :ref:`Proxy Protocol Transport Socket `. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* ratelimit: added :ref:`per route config ` for rate limit filter. +* ratelimit: added support for optional :ref:`descriptor_key ` to Generic Key action. * rbac filter: added the name of the matched policy to the response code detail when a request is rejected by the RBAC filter. -* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. -* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. -* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. +* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. +* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. +* router: added a new :ref:`rate limited retry back off ` strategy that uses headers like `Retry-After` or `X-RateLimit-Reset` to decide the back off interval. * router: added new - :ref:`envoy-ratelimited ` + :ref:`envoy-ratelimited ` retry policy, which allows retrying envoy's own rate limited responses. -* router: added new :ref:`host_rewrite_path_regex ` +* router: added new :ref:`host_rewrite_path_regex ` option, which allows rewriting Host header based on path. -* router: added support for DYNAMIC_METADATA :ref:`header formatter `. -* router_check_tool: added support for ``request_header_matches``, ``response_header_matches`` to :ref:`router check tool `. +* router: added support for DYNAMIC_METADATA :ref:`header formatter `. +* router_check_tool: added support for ``request_header_matches``, ``response_header_matches`` to :ref:`router check tool `. * signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). -* stats: added optional histograms to :ref:`cluster stats ` +* stats: added optional histograms to :ref:`cluster stats ` that track headers and body sizes of requests and responses. * stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. -* tap: added :ref:`generic body matcher ` to scan http requests and responses for text or hex patterns. -* tcp_proxy: added :ref:`max_downstream_connection_duration ` for downstream connection. When max duration is reached the connection will be closed. +* tap: added :ref:`generic body matcher ` to scan http requests and responses for text or hex patterns. +* tcp_proxy: added :ref:`max_downstream_connection_duration ` for downstream connection. When max duration is reached the connection will be closed. * tcp_proxy: allow earlier network filters to set metadataMatchCriteria on the connection StreamInfo to influence load balancing. -* tls: added OCSP stapling support through the :ref:`ocsp_staple ` and :ref:`ocsp_staple_policy ` configuration options. See :ref:`OCSP Stapling ` for usage and runtime flags. -* tls: introduce new :ref:`extension point ` for overriding :ref:`TLS handshaker ` behavior. +* tls: added OCSP stapling support through the :ref:`ocsp_staple ` and :ref:`ocsp_staple_policy ` configuration options. See :ref:`OCSP Stapling ` for usage and runtime flags. +* tls: introduce new :ref:`extension point ` for overriding :ref:`TLS handshaker ` behavior. * tls: switched from using socket BIOs to using custom BIOs that know how to interact with IoHandles. The feature can be disabled by setting runtime feature ``envoy.reloadable_features.tls_use_io_handle_bio`` to false. -* tracing: added ability to set some :ref:`optional segment fields ` in the AWS X-Ray tracer. -* udp_proxy: added :ref:`hash_policies ` to support hash based routing. -* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. -* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter `. -* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions `. -* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. -* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. -* xds: added :ref:`extension config discovery ` support for HTTP filters. +* tracing: added ability to set some :ref:`optional segment fields ` in the AWS X-Ray tracer. +* udp_proxy: added :ref:`hash_policies ` to support hash based routing. +* udp_proxy: added :ref:`use_original_src_ip ` option to replicate the downstream remote address of the packets on the upstream side of Envoy. It is similar to :ref:`original source filter `. +* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter `. +* watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions `. +* watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. +* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. +* xds: added :ref:`extension config discovery ` support for HTTP filters. * xds: added support for mixed v2/v3 discovery response, which enable type url downgrade and upgrade. This feature is disabled by default and is controlled by runtime guard ``envoy.reloadable_features.enable_type_url_downgrade_and_upgrade``. * zlib: added option to use `zlib-ng `_ as zlib library. Deprecated ---------- -* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. -* cluster: the :ref:`track_timeout_budgets ` - field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. -* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. -* hds: the :ref:`endpoints_health ` - field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain +* build: alpine based debug image is deprecated in favor of :ref:`Ubuntu based debug image `. +* cluster: the :ref:`track_timeout_budgets ` + field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. +* ext_authz: the :ref:`dynamic metadata ` field in :ref:`OkHttpResponse ` has been deprecated in favor of :ref:`dynamic metadata ` field in :ref:`CheckResponse `. +* hds: the :ref:`endpoints_health ` + field has been deprecated in favor of :ref:`cluster_endpoints_health ` to maintain grouping by cluster and locality. -* router: the :ref:`include_vh_rate_limits ` field has been deprecated in favor of :ref:`vh_rate_limits `. -* router: the :ref:`max_grpc_timeout ` field has been deprecated in favor of :ref:`grpc_timeout_header_max `. -* router: the :ref:`grpc_timeout_offset ` field has been deprecated in favor of :ref:`grpc_timeout_header_offset `. -* tap: the :ref:`match_config ` field has been deprecated in favor of - :ref:`match ` field. +* router: the :ref:`include_vh_rate_limits ` field has been deprecated in favor of :ref:`vh_rate_limits `. +* router: the :ref:`max_grpc_timeout ` field has been deprecated in favor of :ref:`grpc_timeout_header_max `. +* router: the :ref:`grpc_timeout_offset ` field has been deprecated in favor of :ref:`grpc_timeout_header_offset `. +* tap: the :ref:`match_config ` field has been deprecated in favor of + :ref:`match ` field. * router_check_tool: ``request_header_fields``, ``response_header_fields`` config deprecated in favor of ``request_header_matches``, ``response_header_matches``. -* watchdog: :ref:`watchdog ` deprecated in favor of :ref:`watchdogs `. +* watchdog: :ref:`watchdog ` deprecated in favor of :ref:`watchdogs `. diff --git a/docs/root/version_history/v1.16.3.rst b/docs/root/version_history/v1.16.3.rst index 125902e2f67d..cff413ee2be6 100644 --- a/docs/root/version_history/v1.16.3.rst +++ b/docs/root/version_history/v1.16.3.rst @@ -14,7 +14,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * aggregate cluster: fixed a crash due to a TLS initialization issue. -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * lua: fixed crash when Lua script contains streamInfo():downstreamSslConnection(). @@ -24,7 +24,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.16.4.rst b/docs/root/version_history/v1.16.4.rst index 4864d77cc3a7..ffd25009cf85 100644 --- a/docs/root/version_history/v1.16.4.rst +++ b/docs/root/version_history/v1.16.4.rst @@ -15,12 +15,12 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.16.5.rst b/docs/root/version_history/v1.16.5.rst index fcf1017a2cd4..f1b37a301d81 100644 --- a/docs/root/version_history/v1.16.5.rst +++ b/docs/root/version_history/v1.16.5.rst @@ -25,7 +25,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.0.rst b/docs/root/version_history/v1.17.0.rst index 1d612e757312..b3495a674164 100644 --- a/docs/root/version_history/v1.17.0.rst +++ b/docs/root/version_history/v1.17.0.rst @@ -14,19 +14,19 @@ Minor Behavior Changes * build: the Alpine based debug images are no longer built in CI, use Ubuntu based images instead. * decompressor: set the default value of window_bits of the decompressor to 15 to be able to decompress responses compressed by a compressor with any window size. * expr filter: added ``connection.termination_details`` property support. -* formatter: the :ref:`text_format ` field no longer requires at least one byte, and may now be the empty string. It has also become :ref:`deprecated `. +* formatter: the :ref:`text_format ` field no longer requires at least one byte, and may now be the empty string. It has also become :ref:`deprecated `. * grpc_web filter: if a ``grpc-accept-encoding`` header is present it's passed as-is to the upstream and if it isn't ``grpc-accept-encoding:identity`` is sent instead. The header was always overwriten with ``grpc-accept-encoding:identity,deflate,gzip`` before. * http: upstream protocol will now only be logged if an upstream stream was established. -* jwt_authn filter: added support of JWT time constraint verification with a clock skew (default to 60 seconds) and added a filter config field :ref:`clock_skew_seconds ` to configure it. -* listener: injection of the :ref:`TLS inspector ` has been disabled by default. This feature is controlled by the runtime guard ``envoy.reloadable_features.disable_tls_inspector_injection``. -* lua: added `always_wrap_body` argument to `body()` API to always return a :ref:`buffer object ` even if the body is empty. +* jwt_authn filter: added support of JWT time constraint verification with a clock skew (default to 60 seconds) and added a filter config field :ref:`clock_skew_seconds ` to configure it. +* listener: injection of the :ref:`TLS inspector ` has been disabled by default. This feature is controlled by the runtime guard ``envoy.reloadable_features.disable_tls_inspector_injection``. +* lua: added `always_wrap_body` argument to `body()` API to always return a :ref:`buffer object ` even if the body is empty. * memory: enabled new tcmalloc with restartable sequences for aarch64 builds. * mongo proxy metrics: swapped network connection remote and local closed counters previously set reversed (``cx_destroy_local_with_active_rq`` and ``cx_destroy_remote_with_active_rq``). -* outlier detection: added :ref:`max_ejection_time ` to limit ejection time growth when a node stays unhealthy for extended period of time. By default :ref:`max_ejection_time ` limits ejection time to 5 minutes. Additionally, when the node stays healthy, ejection time decreases. See :ref:`ejection algorithm ` for more info. Previously, ejection time could grow without limit and never decreased. +* outlier detection: added :ref:`max_ejection_time ` to limit ejection time growth when a node stays unhealthy for extended period of time. By default :ref:`max_ejection_time ` limits ejection time to 5 minutes. Additionally, when the node stays healthy, ejection time decreases. See :ref:`ejection algorithm ` for more info. Previously, ejection time could grow without limit and never decreased. * performance: improved performance when handling large HTTP/1 bodies. * tcp_proxy: now waits for HTTP tunnel to be established before start streaming the downstream data, the runtime guard ``envoy.reloadable_features.http_upstream_wait_connect_response`` can be set to "false" to disable this behavior. * tls: removed RSA key transport and SHA-1 cipher suites from the client-side defaults. -* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. +* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. * xds: to support TTLs, heartbeating has been added to xDS. As a result, responses that contain empty resources without updating the version will no longer be propagated to the subscribers. To undo this for VHDS (which is the only subscriber that wants empty resources), the ``envoy.reloadable_features.vhds_heartbeats`` can be set to "false". @@ -34,10 +34,10 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* config: validate that upgrade configs have a non-empty :ref:`upgrade_type `, fixing a bug where an errant "-" could result in unexpected behavior. +* config: validate that upgrade configs have a non-empty :ref:`upgrade_type `, fixing a bug where an errant "-" could result in unexpected behavior. * dns: fixed a bug where custom resolvers provided in configuration were not preserved after network issues. * dns_filter: correctly associate DNS response IDs when multiple queries are received. -* grpc mux: fixed sending node again after stream is reset when :ref:`set_node_on_first_message_only ` is set. +* grpc mux: fixed sending node again after stream is reset when :ref:`set_node_on_first_message_only ` is set. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. * http: reject requests with missing required headers after filter chain processing. * http: sending CONNECT_ERROR for HTTP/2 where appropriate during CONNECT requests. @@ -51,70 +51,70 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * dispatcher: removed legacy socket read/write resumption code path and runtime guard ``envoy.reloadable_features.activate_fds_next_event_loop``. -* ext_authz: removed auto ignore case in HTTP-based ``ext_authz`` header matching and the runtime guard ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher``. To ignore case, set the :ref:`ignore_case ` field to true. +* ext_authz: removed auto ignore case in HTTP-based ``ext_authz`` header matching and the runtime guard ``envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher``. To ignore case, set the :ref:`ignore_case ` field to true. * ext_authz: the deprecated field ``use_alpha`` is no longer supported and cannot be set anymore. * http: removed ``envoy.reloadable_features.http1_flood_protection`` and legacy code path for turning flood protection off. * http: removed ``envoy.reloadable_features.new_codec_behavior`` and legacy codecs. New Features ------------ -* compression: the :ref:`compressor ` filter added support for compressing request payloads. Its configuration is unified with the :ref:`decompressor ` filter with two new fields for different directions - :ref:`requests ` and :ref:`responses `. The latter deprecates the old response-specific fields and, if used, roots the response-specific stats in `.compressor...response.*` instead of `.compressor...*`. -* config: added ability to flush stats when the admin's :ref:`/stats endpoint ` is hit instead of on a timer via :ref:`stats_flush_on_admin `. +* compression: the :ref:`compressor ` filter added support for compressing request payloads. Its configuration is unified with the :ref:`decompressor ` filter with two new fields for different directions - :ref:`requests ` and :ref:`responses `. The latter deprecates the old response-specific fields and, if used, roots the response-specific stats in `.compressor...response.*` instead of `.compressor...*`. +* config: added ability to flush stats when the admin's :ref:`/stats endpoint ` is hit instead of on a timer via :ref:`stats_flush_on_admin `. * config: added new runtime feature ``envoy.features.enable_all_deprecated_features`` that allows the use of all deprecated features. * crash support: added the ability to dump L4 connection data on crash. -* formatter: added new :ref:`text_format_source ` field to support format strings both inline and from a file. -* formatter: added support for custom date formatting to :ref:`%DOWNSTREAM_PEER_CERT_V_START% ` and :ref:`%DOWNSTREAM_PEER_CERT_V_END% `, similar to :ref:`%START_TIME% `. -* grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. -* grpc-json: added support for configuring :ref:`unescaping behavior ` for path components. -* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. -* health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. -* http: added HCM :ref:`request_headers_timeout config field ` to control how long a downstream has to finish sending headers before the stream is cancelled. +* formatter: added new :ref:`text_format_source ` field to support format strings both inline and from a file. +* formatter: added support for custom date formatting to :ref:`%DOWNSTREAM_PEER_CERT_V_START% ` and :ref:`%DOWNSTREAM_PEER_CERT_V_END% `, similar to :ref:`%START_TIME% `. +* grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. +* grpc-json: added support for configuring :ref:`unescaping behavior ` for path components. +* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. +* health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. +* http: added HCM :ref:`request_headers_timeout config field ` to control how long a downstream has to finish sending headers before the stream is cancelled. * http: added frame flood and abuse checks to the upstream HTTP/2 codec. This check is off by default and can be enabled by setting the ``envoy.reloadable_features.upstream_http2_flood_checks`` runtime key to true. -* http: added :ref:`stripping any port from host header ` support. -* http: clusters added support for selecting HTTP/1 or HTTP/2 based on ALPN, configurable via :ref:`alpn_config ` in the :ref:`http_protocol_options ` message. -* jwt_authn: added support for :ref:`per-route config `. -* jwt_authn: changed config field :ref:`issuer ` to be optional to comply with JWT `RFC `_ requirements. -* kill_request: added new :ref:`HTTP kill request filter `. -* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. -* listener: added back the :ref:`use_original_dst field `. -* listener: added the :ref:`Listener.bind_to_port field `. +* http: added :ref:`stripping any port from host header ` support. +* http: clusters added support for selecting HTTP/1 or HTTP/2 based on ALPN, configurable via :ref:`alpn_config ` in the :ref:`http_protocol_options ` message. +* jwt_authn: added support for :ref:`per-route config `. +* jwt_authn: changed config field :ref:`issuer ` to be optional to comply with JWT `RFC `_ requirements. +* kill_request: added new :ref:`HTTP kill request filter `. +* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. +* listener: added back the :ref:`use_original_dst field `. +* listener: added the :ref:`Listener.bind_to_port field `. * log: added a new custom flag ``%_`` to the log pattern to print the actual message to log, but with escaped newlines. -* lua: added `downstreamDirectRemoteAddress()` and `downstreamLocalAddress()` APIs to :ref:`streamInfo() `. -* mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. -* network: added a :ref:`transport_socket_connect_timeout config field ` for incoming connections completing transport-level negotiation, including TLS and ALTS hanshakes. -* overload: added :ref:`envoy.overload_actions.reduce_timeouts ` overload action to enable scaling timeouts down with load. Scaling support :ref:`is limited ` to the HTTP connection and stream idle timeouts. -* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. -* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. -* ratelimit: added :ref:`body ` field to support custom response bodies for non-OK responses from the external ratelimit service. -* ratelimit: added :ref:`descriptor extensions `. -* ratelimit: added :ref:`computed descriptors `. -* ratelimit: added :ref:`dynamic_metadata ` field to support setting dynamic metadata from the ratelimit service. -* router: added support for regex rewrites during HTTP redirects using :ref:`regex_rewrite `. -* sds: improved support for atomic :ref:`key rotations ` and added configurable rotation triggers for - :ref:`TlsCertificate ` and - :ref:`CertificateValidationContext `. -* signal: added an extension point for custom actions to run on the thread that has encountered a fatal error. Actions are configurable via :ref:`fatal_actions `. -* start_tls: added new :ref:`transport socket ` which starts in clear-text but may programatically be converted to use tls. -* tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. -* thrift_proxy: added a new :ref:`payload_passthrough ` option to skip decoding body in the Thrift message. +* lua: added `downstreamDirectRemoteAddress()` and `downstreamLocalAddress()` APIs to :ref:`streamInfo() `. +* mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. +* network: added a :ref:`transport_socket_connect_timeout config field ` for incoming connections completing transport-level negotiation, including TLS and ALTS hanshakes. +* overload: added :ref:`envoy.overload_actions.reduce_timeouts ` overload action to enable scaling timeouts down with load. Scaling support :ref:`is limited ` to the HTTP connection and stream idle timeouts. +* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. +* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. +* ratelimit: added :ref:`body ` field to support custom response bodies for non-OK responses from the external ratelimit service. +* ratelimit: added :ref:`descriptor extensions `. +* ratelimit: added :ref:`computed descriptors `. +* ratelimit: added :ref:`dynamic_metadata ` field to support setting dynamic metadata from the ratelimit service. +* router: added support for regex rewrites during HTTP redirects using :ref:`regex_rewrite `. +* sds: improved support for atomic :ref:`key rotations ` and added configurable rotation triggers for + :ref:`TlsCertificate ` and + :ref:`CertificateValidationContext `. +* signal: added an extension point for custom actions to run on the thread that has encountered a fatal error. Actions are configurable via :ref:`fatal_actions `. +* start_tls: added new :ref:`transport socket ` which starts in clear-text but may programatically be converted to use tls. +* tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. +* thrift_proxy: added a new :ref:`payload_passthrough ` option to skip decoding body in the Thrift message. * tls: added support for RSA certificates with 4096-bit keys in FIPS mode. -* tracing: added :ref:`SkyWalking tracer `. -* tracing: added support for setting the hostname used when sending spans to a Zipkin collector using the :ref:`collector_hostname ` field. -* xds: added support for resource TTLs. A TTL is specified on the :ref:`Resource `. For SotW, a :ref:`Resource ` can be embedded in the list of resources to specify the TTL. +* tracing: added :ref:`SkyWalking tracer `. +* tracing: added support for setting the hostname used when sending spans to a Zipkin collector using the :ref:`collector_hostname ` field. +* xds: added support for resource TTLs. A TTL is specified on the :ref:`Resource `. For SotW, a :ref:`Resource ` can be embedded in the list of resources to specify the TTL. .. _1_17_deprecated: Deprecated ---------- -* cluster: HTTP configuration for upstream clusters has been reworked. HTTP-specific configuration is now done in the new :ref:`http_protocol_options ` message, configured via the cluster's :ref:`extension_protocol_options `. This replaces explicit HTTP configuration in cluster config, including :ref:`upstream_http_protocol_options ` :ref:`common_http_protocol_options ` :ref:`http_protocol_options ` :ref:`http2_protocol_options ` and :ref:`protocol_selection `. Examples of before-and-after configuration can be found in the :ref:`http_protocol_options docs ` and all of Envoy's example configurations have been updated to the new style of config. -* compression: the fields :ref:`content_length `, :ref:`content_type `, :ref:`disable_on_etag_header `, :ref:`remove_accept_encoding_header ` and :ref:`runtime_enabled ` of the :ref:`Compressor ` message have been deprecated in favor of :ref:`response_direction_config `. -* formatter: :ref:`text_format ` is now deprecated in favor of :ref:`text_format_source `. To migrate existing text format strings, use the :ref:`inline_string ` field. -* gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` ``envoy.deprecated_features.allow_deprecated_gzip_http_filter`` set to `true`. Use the :ref:`compressor filter `. -* listener: :ref:`use_proxy_proto ` has been deprecated in favor of adding a :ref:`PROXY protocol listener filter ` explicitly. +* cluster: HTTP configuration for upstream clusters has been reworked. HTTP-specific configuration is now done in the new :ref:`http_protocol_options ` message, configured via the cluster's :ref:`extension_protocol_options `. This replaces explicit HTTP configuration in cluster config, including :ref:`upstream_http_protocol_options ` :ref:`common_http_protocol_options ` :ref:`http_protocol_options ` :ref:`http2_protocol_options ` and :ref:`protocol_selection `. Examples of before-and-after configuration can be found in the :ref:`http_protocol_options docs ` and all of Envoy's example configurations have been updated to the new style of config. +* compression: the fields :ref:`content_length `, :ref:`content_type `, :ref:`disable_on_etag_header `, :ref:`remove_accept_encoding_header ` and :ref:`runtime_enabled ` of the :ref:`Compressor ` message have been deprecated in favor of :ref:`response_direction_config `. +* formatter: :ref:`text_format ` is now deprecated in favor of :ref:`text_format_source `. To migrate existing text format strings, use the :ref:`inline_string ` field. +* gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` ``envoy.deprecated_features.allow_deprecated_gzip_http_filter`` set to `true`. Use the :ref:`compressor filter `. +* listener: :ref:`use_proxy_proto ` has been deprecated in favor of adding a :ref:`PROXY protocol listener filter ` explicitly. * logging: the ``--log-format-prefix-with-location`` option is removed. -* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. +* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. * stats: the ``--use-fake-symbol-table`` option is removed. -* tracing: OpenCensus :ref:`Zipkin configuration ` is now deprecated, the preferred Zipkin export is via Envoy's :ref:`native Zipkin tracer `. +* tracing: OpenCensus :ref:`Zipkin configuration ` is now deprecated, the preferred Zipkin export is via Envoy's :ref:`native Zipkin tracer `. diff --git a/docs/root/version_history/v1.17.1.rst b/docs/root/version_history/v1.17.1.rst index 35429d90da7b..c5caa5767371 100644 --- a/docs/root/version_history/v1.17.1.rst +++ b/docs/root/version_history/v1.17.1.rst @@ -18,7 +18,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.2.rst b/docs/root/version_history/v1.17.2.rst index 570447c67a04..07a9947e1e9f 100644 --- a/docs/root/version_history/v1.17.2.rst +++ b/docs/root/version_history/v1.17.2.rst @@ -13,14 +13,14 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * tls: fix a crash when peer sends a TLS Alert with an unknown code. Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.17.3.rst b/docs/root/version_history/v1.17.3.rst index 95fff3704c8e..ee7fdd27fa72 100644 --- a/docs/root/version_history/v1.17.3.rst +++ b/docs/root/version_history/v1.17.3.rst @@ -15,11 +15,11 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.17.4.rst b/docs/root/version_history/v1.17.4.rst index 2535eb913a57..1ef66c482868 100644 --- a/docs/root/version_history/v1.17.4.rst +++ b/docs/root/version_history/v1.17.4.rst @@ -27,7 +27,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.18.0.rst b/docs/root/version_history/v1.18.0.rst index 70b32883e77c..254c4cbab0ac 100644 --- a/docs/root/version_history/v1.18.0.rst +++ b/docs/root/version_history/v1.18.0.rst @@ -6,38 +6,38 @@ Incompatible Behavior Changes *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* * config: the v2 xDS API is no longer supported by the Envoy binary. -* grpc_stats: the default value for :ref:`stats_for_all_methods ` is switched from true to false, in order to avoid possible memory exhaustion due to an untrusted downstream sending a large number of unique method names. The previous default value was deprecated in version 1.14.0. This only changes the behavior when the value is not set. The previous behavior can be used by setting the value to true. This behavior change by be overridden by setting runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. +* grpc_stats: the default value for :ref:`stats_for_all_methods ` is switched from true to false, in order to avoid possible memory exhaustion due to an untrusted downstream sending a large number of unique method names. The previous default value was deprecated in version 1.14.0. This only changes the behavior when the value is not set. The previous behavior can be used by setting the value to true. This behavior change by be overridden by setting runtime feature ``envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default``. * http: fixing a standards compliance issue with :scheme. The :scheme header sent upstream is now based on the original URL scheme, rather than set based on the security of the upstream connection. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.preserve_downstream_scheme`` to false. * http: http3 is now enabled/disabled via build option ``--define http3=disabled`` rather than the extension framework. The behavior is the same, but builds may be affected for platforms or build configurations where http3 is not supported. -* http: resolving inconsistencies between :scheme and X-Forwarded-Proto. :scheme will now be set for all HTTP/1.1 requests. This changes the behavior of the gRPC access logger, Wasm filters, CSRF filter and oath2 filter for HTTP/1 traffic, where :scheme was previously not set. This change also validates that for front-line Envoys (Envoys configured with :ref:`xff_num_trusted_hops ` set to 0 and :ref:`use_remote_address ` set to true) that HTTP/1.1 https schemed requests can not be sent over non-TLS connections. All behavioral changes listed here can be temporarily reverted by setting ``envoy.reloadable_features.add_and_validate_scheme_header`` to false. +* http: resolving inconsistencies between :scheme and X-Forwarded-Proto. :scheme will now be set for all HTTP/1.1 requests. This changes the behavior of the gRPC access logger, Wasm filters, CSRF filter and oath2 filter for HTTP/1 traffic, where :scheme was previously not set. This change also validates that for front-line Envoys (Envoys configured with :ref:`xff_num_trusted_hops ` set to 0 and :ref:`use_remote_address ` set to true) that HTTP/1.1 https schemed requests can not be sent over non-TLS connections. All behavioral changes listed here can be temporarily reverted by setting ``envoy.reloadable_features.add_and_validate_scheme_header`` to false. * http: when a protocol error is detected in response from upstream, Envoy sends 502 BadGateway downstream and access log entry contains UPE flag. This behavior change can be overwritten to use error code 503 by setting ``envoy.reloadable_features.return_502_for_upstream_protocol_errors`` to false. Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* access_logs: change command operator %UPSTREAM_CLUSTER% to resolve to :ref:`alt_stat_name ` if provided. This behavior can be reverted by disabling the runtime feature ``envoy.reloadable_features.use_observable_cluster_name``. +* access_logs: change command operator %UPSTREAM_CLUSTER% to resolve to :ref:`alt_stat_name ` if provided. This behavior can be reverted by disabling the runtime feature ``envoy.reloadable_features.use_observable_cluster_name``. * access_logs: fix substition formatter to recognize commands ending with an integer such as DOWNSTREAM_PEER_FINGERPRINT_256. * access_logs: set the error flag ``NC`` for ``no cluster found`` instead of ``NR`` if the route is found but the corresponding cluster is not available. -* admin: added :ref:`observability_name ` information to GET /clusters?format=json :ref:`cluster status `. -* dns: both the :ref:`strict DNS ` and - :ref:`logical DNS ` cluster types now honor the - :ref:`hostname ` field if not empty. +* admin: added :ref:`observability_name ` information to GET /clusters?format=json :ref:`cluster status `. +* dns: both the :ref:`strict DNS ` and + :ref:`logical DNS ` cluster types now honor the + :ref:`hostname ` field if not empty. Previously resolved hosts would have their hostname set to the configured DNS address for use with - logging, :ref:`auto_host_rewrite `, etc. + logging, :ref:`auto_host_rewrite `, etc. Setting the hostname manually allows overriding the internal hostname used for such features while still allowing the original DNS resolution name to be used. * grpc_json_transcoder: the filter now adheres to encoder and decoder buffer limits. Requests and responses that require buffering over the limits will be directly rejected. The behavior can be reverted by disabling runtime feature ``envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits``. - To reduce or increase the buffer limits the filter adheres to, reference the :ref:`flow control documentation `. -* hds: support custom health check port via :ref:`health_check_config `. -* healthcheck: the :ref:`health check filter ` now sends the - :ref:`x-envoy-immediate-health-check-fail ` header + To reduce or increase the buffer limits the filter adheres to, reference the :ref:`flow control documentation `. +* hds: support custom health check port via :ref:`health_check_config `. +* healthcheck: the :ref:`health check filter ` now sends the + :ref:`x-envoy-immediate-health-check-fail ` header for all responses when Envoy is in the health check failed state. Additionally, receiving the - :ref:`x-envoy-immediate-health-check-fail ` - header (either in response to normal traffic or in response to an HTTP :ref:`active health check `) will - cause Envoy to immediately :ref:`exclude ` the host from + :ref:`x-envoy-immediate-health-check-fail ` + header (either in response to normal traffic or in response to an HTTP :ref:`active health check `) will + cause Envoy to immediately :ref:`exclude ` the host from load balancing calculations. This has the useful property that such hosts, which are being explicitly told to disable traffic, will not be counted for panic routing calculations. See the excluded documentation for more information. This behavior can be temporarily reverted by setting @@ -58,12 +58,12 @@ Minor Behavior Changes initial HEADERS frame for the new stream. Before the counter was incrementred when Envoy received response HEADERS frame with the END_HEADERS flag set from upstream server. * lua: added function ``timestamp`` to provide millisecond resolution timestamps by passing in ``EnvoyTimestampResolution.MILLISECOND``. -* oauth filter: added the optional parameter :ref:`auth_scopes ` with default value of 'user' if not provided. This allows this value to be overridden in the Authorization request to the OAuth provider. +* oauth filter: added the optional parameter :ref:`auth_scopes ` with default value of 'user' if not provided. This allows this value to be overridden in the Authorization request to the OAuth provider. * perf: allow reading more bytes per operation from raw sockets to improve performance. -* router: extended custom date formatting to DOWNSTREAM_PEER_CERT_V_START and DOWNSTREAM_PEER_CERT_V_END when using :ref:`custom request/response header formats `. +* router: extended custom date formatting to DOWNSTREAM_PEER_CERT_V_START and DOWNSTREAM_PEER_CERT_V_END when using :ref:`custom request/response header formats `. * router: made the path rewrite available without finalizing headers, so the filter could calculate the current value of the final url. -* tracing: added ``upstream_cluster.name`` tag that resolves to resolve to :ref:`alt_stat_name ` if provided (and otherwise the cluster name). -* udp: configuration has been added for :ref:`GRO ` +* tracing: added ``upstream_cluster.name`` tag that resolves to resolve to :ref:`alt_stat_name ` if provided (and otherwise the cluster name). +* udp: configuration has been added for :ref:`GRO ` which used to be force enabled if the OS supports it. The default is now disabled for server sockets and enabled for client sockets (see the new features section for links). * upstream: host weight changes now cause a full load balancer rebuild as opposed to happening @@ -80,9 +80,9 @@ Bug Fixes * active http health checks: properly handles HTTP/2 GOAWAY frames from the upstream. Previously a GOAWAY frame due to a graceful listener drain could cause improper failed health checks due to streams being refused by the upstream on a connection that is going away. To revert to old GOAWAY handling behavior, set the runtime feature ``envoy.reloadable_features.health_check.graceful_goaway_handling`` to false. * adaptive concurrency: fixed a bug where concurrent requests on different worker threads could update minRTT back-to-back. * buffer: tighten network connection read and write buffer high watermarks in preparation to more careful enforcement of read limits. Buffer high-watermark is now set to the exact configured value; previously it was set to value + 1. -* cdn_loop: check that the entirety of the :ref:`cdn_id ` field is a valid CDN identifier. +* cdn_loop: check that the entirety of the :ref:`cdn_id ` field is a valid CDN identifier. * cds: fix blocking the update for a warming cluster when the update is the same as the active version. -* ext_authz: emit :ref:`CheckResponse.dynamic_metadata ` when the external authorization response has "Denied" check status. +* ext_authz: emit :ref:`CheckResponse.dynamic_metadata ` when the external authorization response has "Denied" check status. * fault injection: stop counting as active fault after delay elapsed. Previously fault injection filter continues to count the injected delay as an active fault even after it has elapsed. This produces incorrect output statistics and impacts the max number of consecutive faults allowed (e.g., for long-lived streams). This change decreases the active fault count when the delay fault is the only active and has gone finished. * filter_chain: fix filter chain matching with the server name as the case-insensitive way. * grpc-web: fix local reply and non-proto-encoded gRPC response handling for small response bodies. This fix can be temporarily reverted by setting ``envoy.reloadable_features.grpc_web_fix_non_proto_encoded_response_handling`` to false. @@ -91,8 +91,8 @@ Bug Fixes * http: avoid grpc-status overwrite on when sending local replies if that field has already been set. * http: disallowing "host:" in request_headers_to_add for behavioral consistency with rejecting :authority header. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_host_like_authority`` to false. * http: fixed an issue where Envoy did not handle peer stream limits correctly, and queued streams in nghttp2 rather than establish new connections. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.improved_stream_limit_handling`` to false. -* http: fixed a bug where setting :ref:`MaxStreamDuration proto ` did not disable legacy timeout defaults. -* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. +* http: fixed a bug where setting :ref:`MaxStreamDuration proto ` did not disable legacy timeout defaults. +* http: fixed a crash upon receiving empty HTTP/2 metadata frames. Received empty metadata frames are now counted in the HTTP/2 codec stat :ref:`metadata_empty_frames `. * http: fixed a remotely exploitable integer overflow via a very large grpc-timeout value causes undefined behavior. * http: reverting a behavioral change where upstream connect timeouts were temporarily treated differently from other connection failures. The change back to the original behavior can be temporarily reverted by setting ``envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure`` to false. * jwt_authn: reject requests with a proper error if JWT has the wrong issuer when allow_missing is used. Before this change, the requests are accepted. @@ -109,7 +109,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * access_logs: removed legacy unbounded access logs and runtime guard ``envoy.reloadable_features.disallow_unbounded_access_logs``. * dns: removed legacy buggy wildcard matching path and runtime guard ``envoy.reloadable_features.fix_wildcard_matching``. @@ -127,68 +127,68 @@ Removed Config or Runtime New Features ------------ -* access log: added a new :ref:`OpenTelemetry access logger ` extension, allowing a flexible log structure with native Envoy access log formatting. +* access log: added a new :ref:`OpenTelemetry access logger ` extension, allowing a flexible log structure with native Envoy access log formatting. * access log: added the new response flag ``NC`` for upstream cluster not found. The error flag is set when the http or tcp route is found for the request but the cluster is not available. -* access log: added the :ref:`formatters ` extension point for custom formatters (command operators). -* access log: added support for cross platform writing to :ref:`standard output ` and :ref:`standard error `. +* access log: added the :ref:`formatters ` extension point for custom formatters (command operators). +* access log: added support for cross platform writing to :ref:`standard output ` and :ref:`standard error `. * access log: support command operator: %FILTER_CHAIN_NAME% for the downstream tcp and http request. * access log: support command operator: %REQUEST_HEADERS_BYTES%, %RESPONSE_HEADERS_BYTES%, and %RESPONSE_TRAILERS_BYTES%. -* admin: added support for :ref:`access loggers ` to the admin interface. -* composite filter: added new :ref:`composite filter ` that can be used to instantiate different filter configuratios based on matching incoming data. -* compression: add brotli :ref:`compressor ` and :ref:`decompressor `. +* admin: added support for :ref:`access loggers ` to the admin interface. +* composite filter: added new :ref:`composite filter ` that can be used to instantiate different filter configuratios based on matching incoming data. +* compression: add brotli :ref:`compressor ` and :ref:`decompressor `. * compression: extended the compression allow compressing when the content length header is not present. This behavior may be temporarily reverted by setting ``envoy.reloadable_features.enable_compression_without_content_length_header`` to false. * config: add ``envoy.features.fail_on_any_deprecated_feature`` runtime key, which matches the behaviour of compile-time flag ``ENVOY_DISABLE_DEPRECATED_FEATURES``, i.e. use of deprecated fields will cause a crash. -* config: the ``Node`` :ref:`dynamic context parameters ` are populated in discovery requests when set on the server instance. +* config: the ``Node`` :ref:`dynamic context parameters ` are populated in discovery requests when set on the server instance. * dispatcher: supports a stack of ``Envoy::ScopeTrackedObject`` instead of a single tracked object. This will allow Envoy to dump more debug information on crash. -* ext_authz: added :ref:`response_headers_to_add ` to support sending response headers to downstream clients on OK authorization checks via gRPC. -* ext_authz: added :ref:`allowed_client_headers_on_success ` to support sending response headers to downstream clients on OK external authorization checks via HTTP. -* grpc_json_transcoder: added :ref:`request_validation_options ` to reject invalid requests early. +* ext_authz: added :ref:`response_headers_to_add ` to support sending response headers to downstream clients on OK authorization checks via gRPC. +* ext_authz: added :ref:`allowed_client_headers_on_success ` to support sending response headers to downstream clients on OK external authorization checks via HTTP. +* grpc_json_transcoder: added :ref:`request_validation_options ` to reject invalid requests early. * grpc_json_transcoder: filter can now be configured on per-route/per-vhost level as well. Leaving empty list of services in the filter configuration disables transcoding on the specific route. * http: added support for ``Envoy::ScopeTrackedObject`` for HTTP/1 and HTTP/2 dispatching. Crashes while inside the dispatching loop should dump debug information. Furthermore, HTTP/1 and HTTP/2 clients now dumps the originating request whose response from the upstream caused Envoy to crash. -* http: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic, especially if using HTTP/1.1. -* http: added support for stream filters to mutate the cached route set by HCM route resolution. Useful for filters in a filter chain that want to override specific methods/properties of a route. See :ref:`http route mutation ` docs for more information. +* http: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic, especially if using HTTP/1.1. +* http: added support for stream filters to mutate the cached route set by HCM route resolution. Useful for filters in a filter chain that want to override specific methods/properties of a route. See :ref:`http route mutation ` docs for more information. * http: added new runtime config ``envoy.reloadable_features.check_unsupported_typed_per_filter_config``, the default value is true. When the value is true, envoy will reject virtual host-specific typed per filter config when the filter doesn't support it. -* http: added the ability to preserve HTTP/1 header case across the proxy. See the :ref:`header casing ` documentation for more information. +* http: added the ability to preserve HTTP/1 header case across the proxy. See the :ref:`header casing ` documentation for more information. * http: change frame flood and abuse checks to the upstream HTTP/2 codec to ON by default. It can be disabled by setting the ``envoy.reloadable_features.upstream_http2_flood_checks`` runtime key to false. -* http: hash multiple header values instead of only hash the first header value. It can be disabled by setting the ``envoy.reloadable_features.hash_multiple_header_values`` runtime key to false. See the :ref:`HashPolicy's Header configuration ` for more information. +* http: hash multiple header values instead of only hash the first header value. It can be disabled by setting the ``envoy.reloadable_features.hash_multiple_header_values`` runtime key to false. See the :ref:`HashPolicy's Header configuration ` for more information. * json: introduced new JSON parser (https://github.com/nlohmann/json) to replace RapidJSON. The new parser is disabled by default. To test the new RapidJSON parser, enable the runtime feature ``envoy.reloadable_features.remove_legacy_json``. -* kill_request: :ref:`Kill Request ` now supports bidirection killing. -* listener: added an optional :ref:`stat_prefix `. -* loadbalancer: added the ability to specify the hash_key for a host when using a consistent hashing loadbalancer (ringhash, maglev) using the :ref:`LbEndpoint.Metadata ` e.g.: ``"envoy.lb": {"hash_key": "..."}``. +* kill_request: :ref:`Kill Request ` now supports bidirection killing. +* listener: added an optional :ref:`stat_prefix `. +* loadbalancer: added the ability to specify the hash_key for a host when using a consistent hashing loadbalancer (ringhash, maglev) using the :ref:`LbEndpoint.Metadata ` e.g.: ``"envoy.lb": {"hash_key": "..."}``. * log: added a new custom flag ``%j`` to the log pattern to print the actual message to log as JSON escaped string. -* oauth filter: added the optional parameter :ref:`resources `. Set this value to add multiple "resource" parameters in the Authorization request sent to the OAuth provider. This acts as an identifier representing the protected resources the client is requesting a token for. -* original_dst: added support for :ref:`Original Destination ` on Windows. This enables the use of Envoy as a sidecar proxy on Windows. -* overload: add support for scaling :ref:`transport connection timeouts `. This can be used to reduce the TLS handshake timeout in response to overload. -* postgres: added ability to :ref:`terminate SSL `. -* rbac: added :ref:`shadow_rules_stat_prefix ` to allow adding custom prefix to the stats emitted by shadow rules. -* route config: added :ref:`allow_post field ` for allowing POST payload as raw TCP. -* route config: added :ref:`max_direct_response_body_size_bytes ` to set maximum :ref:`direct response body ` size in bytes. If not specified the default remains 4096 bytes. -* server: added *fips_mode* to :ref:`server compilation settings ` related statistic. +* oauth filter: added the optional parameter :ref:`resources `. Set this value to add multiple "resource" parameters in the Authorization request sent to the OAuth provider. This acts as an identifier representing the protected resources the client is requesting a token for. +* original_dst: added support for :ref:`Original Destination ` on Windows. This enables the use of Envoy as a sidecar proxy on Windows. +* overload: add support for scaling :ref:`transport connection timeouts `. This can be used to reduce the TLS handshake timeout in response to overload. +* postgres: added ability to :ref:`terminate SSL `. +* rbac: added :ref:`shadow_rules_stat_prefix ` to allow adding custom prefix to the stats emitted by shadow rules. +* route config: added :ref:`allow_post field ` for allowing POST payload as raw TCP. +* route config: added :ref:`max_direct_response_body_size_bytes ` to set maximum :ref:`direct response body ` size in bytes. If not specified the default remains 4096 bytes. +* server: added *fips_mode* to :ref:`server compilation settings ` related statistic. * server: added :option:`--enable-core-dump` flag to enable core dumps via prctl (Linux-based systems only). -* tcp_proxy: add support for converting raw TCP streams into HTTP/1.1 CONNECT requests. See :ref:`upgrade documentation ` for details. -* tcp_proxy: added a :ref:`use_post field ` for using HTTP POST to proxy TCP streams. -* tcp_proxy: added a :ref:`headers_to_add field ` for setting additional headers to the HTTP requests for TCP proxing. -* thrift_proxy: added a :ref:`max_requests_per_connection field ` for setting maximum requests for per downstream connection. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for messagetype counters in request/response. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request time histograms. -* tls peer certificate validation: added :ref:`SPIFFE validator ` for supporting isolated multiple trust bundles in a single listener or cluster. -* tracing: added the :ref:`pack_trace_reason ` - field as well as explicit configuration for the built-in :ref:`UuidRequestIdConfig ` +* tcp_proxy: add support for converting raw TCP streams into HTTP/1.1 CONNECT requests. See :ref:`upgrade documentation ` for details. +* tcp_proxy: added a :ref:`use_post field ` for using HTTP POST to proxy TCP streams. +* tcp_proxy: added a :ref:`headers_to_add field ` for setting additional headers to the HTTP requests for TCP proxing. +* thrift_proxy: added a :ref:`max_requests_per_connection field ` for setting maximum requests for per downstream connection. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for messagetype counters in request/response. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request time histograms. +* tls peer certificate validation: added :ref:`SPIFFE validator ` for supporting isolated multiple trust bundles in a single listener or cluster. +* tracing: added the :ref:`pack_trace_reason ` + field as well as explicit configuration for the built-in :ref:`UuidRequestIdConfig ` request ID implementation. See the trace context propagation :ref:`architecture overview - ` for more information. -* udp: added :ref:`downstream ` and - :ref:`upstream ` statistics for dropped datagrams. -* udp: added :ref:`downstream_socket_config ` + ` for more information. +* udp: added :ref:`downstream ` and + :ref:`upstream ` statistics for dropped datagrams. +* udp: added :ref:`downstream_socket_config ` listener configuration to allow configuration of downstream max UDP datagram size. Also added - :ref:`upstream_socket_config ` + :ref:`upstream_socket_config ` UDP proxy configuration to allow configuration of upstream max UDP datagram size. The defaults for both remain 1500 bytes. * udp: added configuration for :ref:`GRO - `. The default is disabled for - :ref:`downstream sockets ` - and enabled for :ref:`upstream sockets `. + `. The default is disabled for + :ref:`downstream sockets ` + and enabled for :ref:`upstream sockets `. Deprecated ---------- -* admin: :ref:`access_log_path ` is deprecated in favor for :ref:`access loggers `. +* admin: :ref:`access_log_path ` is deprecated in favor for :ref:`access loggers `. diff --git a/docs/root/version_history/v1.18.3.rst b/docs/root/version_history/v1.18.3.rst index 509d0ebc7cf0..99650a6db06f 100644 --- a/docs/root/version_history/v1.18.3.rst +++ b/docs/root/version_history/v1.18.3.rst @@ -17,14 +17,14 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * tls: removed `envoy.reloadable_features.tls_use_io_handle_bio` runtime guard and legacy code path. New Features ------------ -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. Deprecated ---------- diff --git a/docs/root/version_history/v1.18.4.rst b/docs/root/version_history/v1.18.4.rst index 9e66511ff807..5f107c7f96aa 100644 --- a/docs/root/version_history/v1.18.4.rst +++ b/docs/root/version_history/v1.18.4.rst @@ -9,7 +9,7 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* http: disable the integration between :ref:`ExtensionWithMatcher ` +* http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflects its experimental status. This feature can be enabled by seting `envoy.reloadable_features.experimental_matching_api` to true. * http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request @@ -31,7 +31,7 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ diff --git a/docs/root/version_history/v1.19.0.rst b/docs/root/version_history/v1.19.0.rst index 581a535a17df..16f8c8b86068 100644 --- a/docs/root/version_history/v1.19.0.rst +++ b/docs/root/version_history/v1.19.0.rst @@ -15,7 +15,7 @@ Minor Behavior Changes * access_log: added new access_log command operator ``%REQUEST_TX_DURATION%``. * access_log: removed extra quotes on metadata string values. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.unquote_log_string_values`` to false. -* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. +* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. * aws_request_signing: requests are now buffered by default to compute signatures which include the payload hash, making the filter compatible with most AWS services. Previously, requests were never buffered, which only produced correct signatures for requests without a body, or for @@ -23,10 +23,10 @@ Minor Behavior Changes be now be disabled in favor of using unsigned payloads with compatible services via the new ``use_unsigned_payload`` filter option (default false). * cache filter: serve HEAD requests from cache. -* cluster: added default value of 5 seconds for :ref:`connect_timeout `. +* cluster: added default value of 5 seconds for :ref:`connect_timeout `. * dns: changed apple resolver implementation to not reuse the UDS to the local DNS daemon. -* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. -* http: disable the integration between :ref:`ExtensionWithMatcher ` +* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. +* http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflect its experimental status. This feature can be enabled by setting ``envoy.reloadable_features.experimental_matching_api`` to true. * http: replaced setting ``envoy.reloadable_features.strict_1xx_and_204_response_headers`` with settings @@ -38,7 +38,7 @@ Minor Behavior Changes ``envoy.reloadable_features.no_chunked_encoding_header_for_304`` to false. * http: the behavior of the ``present_match`` in route header matcher changed. The value of ``present_match`` was ignored in the past. The new behavior is ``present_match`` is performed when the value is true. An absent match performed when the value is false. Please reference :ref:`present_match `. -* listener: respect the :ref:`connection balance config ` +* listener: respect the :ref:`connection balance config ` defined within the listener where the sockets are redirected to. Clear that field to restore the previous behavior. * listener: when balancing across active listeners and wildcard matching is used, the behavior has been changed to return the listener that matches the IP family type associated with the listener's socket address. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.listener_wildcard_match_ip_family`` to false. * tcp: switched to the new connection pool by default. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.new_tcp_connection_pool`` to false. @@ -49,7 +49,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * aws_lambda: if ``payload_passthrough`` is set to ``false``, the downstream response content-type header will now be set from the content-type entry in the JSON response's headers map, if present. -* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router +* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router filter. This means that we now properly compute the number of bytes sent as well as handling retries which were previously ignored. * hot_restart: fix double counting of ``server.seconds_until_first_ocsp_response_expiring`` and ``server.days_until_first_cert_expiring`` during hot-restart. This stat was only incorrect until the parent process terminated. * http: fix erroneous handling of invalid nghttp2 frames with the ``NGHTTP2_ERR_REFUSED_STREAM`` error. Prior to the fix, @@ -65,15 +65,15 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * event: removed ``envoy.reloadable_features.activate_timers_next_event_loop`` runtime guard and legacy code path. * gzip: removed legacy HTTP Gzip filter and runtime guard ``envoy.deprecated_features.allow_deprecated_gzip_http_filter``. * http: removed ``envoy.reloadable_features.allow_500_after_100`` runtime guard and the legacy code path. * http: removed ``envoy.reloadable_features.always_apply_route_header_rules`` runtime guard and legacy code path. -* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. +* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. * http: removed ``envoy.reloadable_features.http_set_copy_replace_all_headers`` runtime guard and legacy code paths. -* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. +* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. * http: removed ``envoy.reloadable_features.http_match_on_all_headers`` runtime guard and legacy code paths. * http: removed ``envoy.reloadable_features.unify_grpc_handling`` runtime guard and legacy code paths. * tls: removed ``envoy.reloadable_features.tls_use_io_handle_bio`` runtime guard and legacy code path. @@ -81,62 +81,62 @@ Removed Config or Runtime New Features ------------ -* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. -* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. -* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. -* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. -* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. -* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. -* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. +* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. +* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. +* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. +* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. +* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. +* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. +* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. * composite filter: can now be used with filters that also add an access logger, such as the WASM filter. -* config: added stat :ref:`config_reload_time_ms `. -* connection_limit: added new :ref:`Network connection limit filter `. +* config: added stat :ref:`config_reload_time_ms `. +* connection_limit: added new :ref:`Network connection limit filter `. * crash support: restore crash context when continuing to processing requests or responses as a result of an asynchronous callback that invokes a filter directly. This is unlike the call stacks that go through the various network layers, to eventually reach the filter. For a concrete example see: ``Envoy::Extensions::HttpFilters::Cache::CacheFilter::getHeaders`` which posts a callback on the dispatcher that will invoke the filter directly. -* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. -* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. -* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. -* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. -* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. -* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. -* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. +* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. +* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. +* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. +* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. +* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. +* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. +* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. * http: added new field ``is_optional`` to ``extensions.filters.network.http_connection_manager.v3.HttpFilter``. When set to ``true``, unsupported http filters will be ignored by envoy. This is also same with unsupported http filter in the typed per filter config. For more information, please reference - :ref:`HttpFilter `. -* http: added :ref:`scheme options ` for adding or overwriting scheme. -* http: added :ref:`stripping trailing host dot from host header ` support. -* http: added support for :ref:`original IP detection extensions `. - Two initial extensions were added, the :ref:`custom header ` extension and the - :ref:`xff ` extension. -* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. -* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. + :ref:`HttpFilter `. +* http: added :ref:`scheme options ` for adding or overwriting scheme. +* http: added :ref:`stripping trailing host dot from host header ` support. +* http: added support for :ref:`original IP detection extensions `. + Two initial extensions were added, the :ref:`custom header ` extension and the + :ref:`xff ` extension. +* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. * http: raise max configurable max_request_headers_kb limit to 8192 KiB (8MiB) from 96 KiB in http connection manager. -* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. -* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. -* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. +* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. +* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. +* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. * listener: added ability to change an existing listener's address. -* listener: added filter chain match support for :ref:`direct source address `. -* local_rate_limit_filter: added suppoort for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. -* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. -* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. +* listener: added filter chain match support for :ref:`direct source address `. +* local_rate_limit_filter: added support for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. +* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. +* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. This feature is currently affected by a memory leak `issue `_. * req_without_query: added access log formatter extension implementing command operator :ref:`REQ_WITHOUT_QUERY ` to log the request path, while excluding the query string. * router: added option ``suppress_grpc_request_failure_code_stats`` to :ref:`the router ` to allow users to exclude incrementing HTTP status code stats on gRPC requests. -* stats: added native :ref:`Graphite-formatted tag ` support. -* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. -* thrift_proxy: added support for :ref:`outlier detection `. +* stats: added native :ref:`Graphite-formatted tag ` support. +* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. +* thrift_proxy: added support for :ref:`outlier detection `. * tls: allow dual ECDSA/RSA certs via SDS. Previously, SDS only supported a single certificate per context, and dual cert was only supported via non-SDS. -* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. -* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. +* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. +* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. * windows container image: added user, EnvoyUser which is part of the Network Configuration Operators group to the container image. Deprecated ---------- -* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. -* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. +* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. +* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. diff --git a/docs/root/version_history/v1.2.0.rst b/docs/root/version_history/v1.2.0.rst index eb54093f6906..ca42c5df855b 100644 --- a/docs/root/version_history/v1.2.0.rst +++ b/docs/root/version_history/v1.2.0.rst @@ -4,27 +4,27 @@ Changes ------- -* :ref:`Cluster discovery service (CDS) API `. -* :ref:`Outlier detection ` (passive health checking). +* :ref:`Cluster discovery service (CDS) API `. +* :ref:`Outlier detection ` (passive health checking). * Envoy configuration is now checked against a JSON schema. -* :ref:`Ring hash ` consistent load balancer, as well as HTTP +* :ref:`Ring hash ` consistent load balancer, as well as HTTP consistent hash routing based on a policy. -* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP +* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP rate limiting filter. * HTTP routing to a cluster retrieved from a header. * Weighted cluster HTTP routing. * Auto host rewrite during HTTP routing. * Regex header matching during HTTP routing. * HTTP access log runtime filter. -* LightStep tracer :ref:`parent/child span association `. -* :ref:`Route discovery service (RDS) API `. +* LightStep tracer :ref:`parent/child span association `. +* :ref:`Route discovery service (RDS) API `. * HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header - ` support. -* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for + ` support. +* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for iptables based transparent proxy support). -* TCP proxy filter :ref:`route table support `. +* TCP proxy filter :ref:`route table support `. * Configurable stats flush interval. -* Various :ref:`third party library upgrades `, including using BoringSSL as +* Various :ref:`third party library upgrades `, including using BoringSSL as the default SSL provider. * No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory savings for large meshes. diff --git a/docs/root/version_history/v1.20.0.rst b/docs/root/version_history/v1.20.0.rst new file mode 100644 index 000000000000..1378e06c09ab --- /dev/null +++ b/docs/root/version_history/v1.20.0.rst @@ -0,0 +1,189 @@ +1.20.0 (October 5, 2021) +======================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* config: due to the switch to using work-in-progress annotations and warnings to indicate APIs + subject to change, the following API packages have been force migrated from ``v3alpha`` to ``v3``: + ``envoy.extensions.access_loggers.open_telemetry.v3``, + ``envoy.extensions.cache.simple_http_cache.v3``, + ``envoy.extensions.filters.http.admission_control.v3``, + ``envoy.extensions.filters.http.bandwidth_limit.v3``, + ``envoy.extensions.filters.http.cache.v3``, + ``envoy.extensions.filters.http.cdn_loop.v3``, + ``envoy.extensions.filters.http.ext_proc.v3``, + ``envoy.extensions.filters.http.oauth2.v3``, + ``envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3``, + ``envoy.extensions.filters.udp.dns_filter.v3``, + ``envoy.extensions.transport_sockets.s2a.v3``, + ``envoy.extensions.watchdog.profile_action.v3``, + ``envoy.service.ext_proc.v3``, and + ``envoy.watchdog.v3``. If your production deployment was using one of these APIs, you will be + forced to potentially vendor the old proto file to continue serving old versions of Envoy. + The project realizes this is unfortunate because some of these are known to be used in production, + however the project does not have the resources to undergo a migration in which we support + ``v3alpha`` and ``v3`` at the same time. The switch to using work-in-progress annotations with + clear and explicit warnings will avoid any such issue in the future. We apologize again for any + difficulty this change causes, though it is for the best. Additionally, some of the above + namespaces have had their work-in-progress annotations removed due to known production usage. + Thus, they will not warn and are offered full API stability support by the project from this + point forward. +* config: the ``--bootstrap-version`` CLI flag has been removed, Envoy has only been able to accept v3 + bootstrap configurations since 1.18.0. +* contrib: the :ref:`squash filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`kafka broker filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`RocketMQ proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`Postgres proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`MySQL proxy filter ` has been moved to + :ref:`contrib images `. +* dns_filter: :ref:`dns_filter ` + protobuf fields have been renumbered to restore compatibility with Envoy + 1.18, breaking compatibility with Envoy 1.19.0 and 1.19.1. The new field + numbering allows control planes supporting Envoy 1.18 to gracefully upgrade to + :ref:`dns_resolution_config `, + provided they skip over Envoy 1.19.0 and 1.19.1. + Control planes upgrading from Envoy 1.19.0 and 1.19.1 will need to + vendor the corresponding protobuf definitions to ensure that the + renumbered fields have the types expected by those releases. +* extensions: deprecated extension names now default to triggering a configuration error. + The previous warning-only behavior may be temporarily reverted by setting the runtime key + ``envoy.deprecated_features.allow_deprecated_extension_names`` to true. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* client_ssl_auth filter: now sets additional termination details and ``UAEX`` response flag when the client certificate is not in the allowed-list. +* config: configuration files ending in .yml now load as YAML. +* config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON files load as JSON. +* config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level + for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been + retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 + seconds. +* config: use of work-in-progress API files, messages, or fields will now generate an explicit + warning. Please read the text about ``(xds.annotations.v3.file_status).work_in_progress``, + ``(xds.annotations.v3.message_status).work_in_progress``, and + ``(xds.annotations.v3.field_status).work_in_progress`` + `here `_ for more information. Some + APIs that are known to be implicitly not work-in-progress have been force migrated and are + individually indicated elsewhere in the release notes. A server-wide ``wip_protos`` counter has + also been added in :ref:`server statistics ` to track this. +* ext_authz: fixed skipping authentication when returning either a direct response or a redirect. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect`` runtime guard to false. +* grpc: gRPC async client can be cached and shared across filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. +* http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ + (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior + can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of the request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. +* http: set the default :ref:`lazy headermap threshold ` to 3, + which defines the minimal number of headers in a request/response/trailers required for using a + dictionary in addition to the list. Setting the ``envoy.http.headermap.lazy_map_min_size`` runtime + feature to a non-negative number will override the default value. +* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. +* listener: added the :ref:`enable_reuse_port ` + field and changed the default for ``reuse_port`` from false to true, as the feature is now well + supported on the majority of production Linux kernels in use. The default change is aware of the hot + restart, as otherwise, the change would not be backward compatible between restarts. This means + that hot restarting onto a new binary will retain the default of false until the binary undergoes + a full restart. To retain the previous behavior, either explicitly set the new configuration + field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` + to false. As part of this change, the use of ``reuse_port`` for TCP listeners on both macOS and + Windows has been disabled due to suboptimal behavior. See the field documentation for more + information. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in-place update. +* quic: enables IETF connection migration. This feature requires a stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* thrift_proxy: allow Framed and Header transport combinations to perform :ref:`payload passthrough `. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. +* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentation `_. +* cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. +* cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. +* compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. +* dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* ext_authz: fix the use of ``append`` field of :ref:`response_headers_to_add ` to set or append encoded response headers from a gRPC auth server. +* ext_authz: fix the HTTP ext_authz filter to respond with ``403 Forbidden`` when a gRPC auth server sends a denied check response with an empty HTTP status code. +* ext_authz: the network ext_authz filter now correctly sets dynamic metadata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. +* hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. +* listener: fixed an issue on Windows where connections are not handled by all worker threads. +* lua: fix ``BodyBuffer`` setting a Lua string and printing Lua string containing hex characters. Previously, ``BodyBuffer`` setting a Lua string or printing strings with hex characters will be truncated. +* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under the 'annotations' section of the segment data. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed ``envoy.reloadable_features.http_upstream_wait_connect_response`` runtime guard and legacy code paths. +* http: removed ``envoy.reloadable_features.allow_preconnect`` runtime guard and legacy code paths. +* listener: removed ``envoy.reloadable_features.disable_tls_inspector_injection`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.check_ocsp_policy deprecation`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs deprecation`` and legacy code paths. +* quic: removed ``envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing`` runtime guard. + +New Features +------------ +* access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). +* bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. +* contrib: added new :ref:`contrib images ` which contain contrib extensions. +* dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. +* ext_authz: added :ref:`dynamic_metadata_from_headers ` to support emitting dynamic metadata from headers returned by an external authorization service via HTTP. +* grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. +* grpc_json_transcoder: added support to unescape '+' in query parameters to space with a new config field :ref:`query_param_unescape_plus `. +* http: added cluster_header in :ref:`weighted_clusters ` to allow routing to the weighted cluster specified in the request_header. +* http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_ and caching the advertisements to disk. +* http: added :ref:`string_match ` in the header matcher. +* http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. +* http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. +* http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. +* http: validating outgoing HTTP/2 CONNECT requests to ensure that if ``:path`` is set that ``:protocol`` is present. This behavior can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.validate_connect`` to false. +* jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. +* jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. +* jwt_authn: added support for setting the extracted headers from a successfully verified JWT using :ref:`header_in_metadata ` to dynamic metadata. +* listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. +* lua: added ``header:getAtIndex()`` and ``header:getNumValues()`` methods to :ref:`header object ` for retrieving the value of a header at certain index and get the total number of values for a given header. +* matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. +* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via :ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. +* rbac: added :ref:`destination_port_range ` for matching range of destination ports. +* rbac: added :ref:`matcher` along with extension category ``extension_category_envoy.rbac.matchers`` for custom RBAC permission matchers. Added reference implementation for matchers :ref:`envoy.rbac.matchers.upstream_ip_port `. +* route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* router: added retry options predicate extensions configured via :ref:`retry_options_predicates. ` These extensions allow modification of requests between retries at the router level. There are not currently any built-in extensions that implement this extension point. +* router: added :ref:`per_try_idle_timeout ` timeout configuration. +* router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. +* sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. +* thrift_proxy: added support for :ref:`mirroring requests `. +* udp: allows updating filter chain in-place through LDS, which is supported by Quic listener. Such listener config will be rejected in other connection-less UDP listener implementations. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* udp: disallow L4 filter chain in config which configures connection-less UDP listener. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* upstream: added support for :ref:`slow start mode `, which allows to progresively increase traffic for new endpoints. +* upstream: extended :ref:`Round Robin load balancer configuration ` with :ref:`slow start ` support. +* upstream: extended :ref:`Least Request load balancer configuration ` with :ref:`slow start ` support. +* windows: added a new container image based on Windows Nanoserver 2022. +* xray: request direction (``ingress`` or ``egress``) is recorded as X-Ray trace segment's annotation by name ``direction``. + +Deprecated +---------- + +* api: the :ref:`matcher ` field has been deprecated in favor of + :ref:`matcher ` in order to break a build dependency. +* cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. +* http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, + :ref:`prefix_match `, :ref:`suffix_match ` and + :ref:`contains_match ` are deprecated by :ref:`string_match `. +* listener: :ref:`reuse_port ` has been + deprecated in favor of :ref:`enable_reuse_port `. + At the same time, the default has been changed from false to true. See above for more information. diff --git a/docs/root/version_history/v1.3.0.rst b/docs/root/version_history/v1.3.0.rst index 234be4dbacad..969dfa127f4a 100644 --- a/docs/root/version_history/v1.3.0.rst +++ b/docs/root/version_history/v1.3.0.rst @@ -5,21 +5,21 @@ Changes ------- * As of this release, we now have an official :repo:`breaking change policy - `. Note that there are numerous breaking configuration + `. Note that there are numerous breaking configuration changes in this release. They are not listed here. Future releases will adhere to the policy and have clear documentation on deprecations and changes. * Bazel is now the canonical build system (replacing CMake). There have been a huge number of changes to the development/build/test flow. See :repo:`/bazel/README.md` and :repo:`/ci/README.md` for more information. -* :ref:`Outlier detection ` has been expanded to include success +* :ref:`Outlier detection ` has been expanded to include success rate variance, and all parameters are now configurable in both runtime and in the JSON configuration. * TCP level listener and cluster connections now have configurable receive buffer limits at which point connection level back pressure is applied. Full end to end flow control will be available in a future release. -* :ref:`Redis health checking ` has been added as an active +* :ref:`Redis health checking ` has been added as an active health check type. Full Redis support will be documented/supported in 1.4.0. -* :ref:`TCP health checking ` now supports a +* :ref:`TCP health checking ` now supports a "connect only" mode that only checks if the remote server can be connected to without writing/reading any data. * `BoringSSL `_ is now the only supported TLS provider. @@ -31,36 +31,36 @@ Changes configurations by default. Use ``include_vh_rate_limits`` to inherit the virtual host level options if desired. * HTTP routes can now add request headers on a per route and per virtual host basis via the - :ref:`request_headers_to_add ` option. -* The :ref:`example configurations ` have been refreshed to demonstrate the + :ref:`request_headers_to_add ` option. +* The :ref:`example configurations ` have been refreshed to demonstrate the latest features. * ``per_try_timeout_ms`` can now be configured in a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms - ` HTTP header. + ` HTTP header. * HTTP virtual host matching now includes support for prefix wildcard domains (e.g., ``*.lyft.com``). * The default for tracing random sampling has been changed to 100% and is still configurable in - :ref:`runtime `. + :ref:`runtime `. * HTTP tracing configuration has been extended to allow tags to be populated from arbitrary HTTP headers. -* The :ref:`HTTP rate limit filter ` can now be applied to internal, +* The :ref:`HTTP rate limit filter ` can now be applied to internal, external, or all requests via the ``request_type`` option. -* :ref:`Listener binding ` now requires specifying an `address` field. This can be +* :ref:`Listener binding ` now requires specifying an `address` field. This can be used to bind a listener to both a specific address as well as a port. -* The :ref:`MongoDB filter ` now emits a stat for queries that +* The :ref:`MongoDB filter ` now emits a stat for queries that do not have ``$maxTimeMS`` set. -* The :ref:`MongoDB filter ` now emits logs that are fully valid +* The :ref:`MongoDB filter ` now emits logs that are fully valid JSON. * The CPU profiler output path is now configurable. * A watchdog system has been added that can kill the server if a deadlock is detected. -* A :ref:`route table checking tool ` has been added that can +* A :ref:`route table checking tool ` has been added that can be used to test route tables before use. -* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. +* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. * Added additional cluster wide information related to outlier detection to the :ref:`/clusters - admin endpoint `. + admin endpoint `. * Multiple SANs can now be verified via the ``verify_subject_alt_name`` setting. Additionally, URI type SANs can be verified. * HTTP filters can now be passed opaque configuration specified on a per route basis. * By default Envoy now has a built in crash handler that will print a back trace. This behavior can be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option. -* Zipkin has been added as a supported :ref:`tracing provider `. +* Zipkin has been added as a supported :ref:`tracing provider `. * Numerous small changes and fixes not listed here. diff --git a/docs/root/version_history/v1.4.0.rst b/docs/root/version_history/v1.4.0.rst index 3342e4faee54..f736fbdf6a43 100644 --- a/docs/root/version_history/v1.4.0.rst +++ b/docs/root/version_history/v1.4.0.rst @@ -4,42 +4,42 @@ Changes ------- -* macOS is :repo:`now supported `. (A few features +* macOS is :repo:`now supported `. (A few features are missing such as hot restart and original destination routing). * YAML is now directly supported for config files. * Added /routes admin endpoint. * End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control that includes filter buffering is incomplete and will be implemented in 1.5.0. -* Log verbosity :repo:`compile time flag ` added. -* Hot restart :repo:`compile time flag ` added. -* Original destination :ref:`cluster ` - and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. +* Log verbosity :repo:`compile time flag ` added. +* Hot restart :repo:`compile time flag ` added. +* Original destination :ref:`cluster ` + and :ref:`load balancer ` added. +* :ref:`WebSocket ` is now supported. * Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure no one is using this feature. * Route ``validate_clusters`` option added. -* :ref:`x-envoy-downstream-service-node ` +* :ref:`x-envoy-downstream-service-node ` header added. -* :ref:`x-forwarded-client-cert ` header +* :ref:`x-forwarded-client-cert ` header added. * Initial HTTP/1 forward proxy support for absolute URLs has been added. * HTTP/2 codec settings are now configurable. -* gRPC/JSON transcoder :ref:`filter ` added. -* gRPC web :ref:`filter ` added. +* gRPC/JSON transcoder :ref:`filter ` added. +* gRPC web :ref:`filter ` added. * Configurable timeout for the rate limit service call in the :ref:`network - ` and :ref:`HTTP ` rate limit + ` and :ref:`HTTP ` rate limit filters. -* :ref:`x-envoy-retry-grpc-on ` header added. -* :ref:`LDS API ` added. +* :ref:`x-envoy-retry-grpc-on ` header added. +* :ref:`LDS API ` added. * TLS :``require_client_certificate`` option added. -* :ref:`Configuration check tool ` added. -* :ref:`JSON schema check tool ` added. +* :ref:`Configuration check tool ` added. +* :ref:`JSON schema check tool ` added. * Config validation mode added via the :option:`--mode` option. * :option:`--local-address-ip-version` option added. * IPv6 support is now complete. * UDP ``statsd_ip_address`` option added. * Per-cluster DNS resolvers added. -* :ref:`Fault filter ` enhancements and fixes. +* :ref:`Fault filter ` enhancements and fixes. * Several features are `deprecated as of the 1.4.0 release `_. They will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the ``HttpFilterConfigFactory`` filter API has been deprecated in favor of diff --git a/docs/root/version_history/v1.5.0.rst b/docs/root/version_history/v1.5.0.rst index 58e1d3147f7b..6986e34b84e7 100644 --- a/docs/root/version_history/v1.5.0.rst +++ b/docs/root/version_history/v1.5.0.rst @@ -5,66 +5,66 @@ Changes ------- * access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS - `. -* admin: added :ref:`JSON output ` for stats admin endpoint. -* admin: added basic :ref:`Prometheus output ` for stats admin + `. +* admin: added :ref:`JSON output ` for stats admin endpoint. +* admin: added basic :ref:`Prometheus output ` for stats admin endpoint. Histograms are not currently output. -* admin: added ``version_info`` to the :ref:`/clusters admin endpoint `. -* config: the :ref:`v2 API ` is now considered production ready. +* admin: added ``version_info`` to the :ref:`/clusters admin endpoint `. +* config: the :ref:`v2 API ` is now considered production ready. * config: added --v2-config-only CLI flag. -* cors: added :ref:`CORS filter `. +* cors: added :ref:`CORS filter `. * health check: added :ref:`x-envoy-immediate-health-check-fail - ` header support. -* health check: added :ref:`reuse_connection ` option. -* http: added :ref:`per-listener stats `. + ` header support. +* health check: added :ref:`reuse_connection ` option. +* http: added :ref:`per-listener stats `. * http: end-to-end HTTP flow control is now complete across both connections, streams, and filters. -* load balancer: added :ref:`subset load balancer `. +* load balancer: added :ref:`subset load balancer `. * load balancer: added ring size and hash :ref:`configuration options - `. This used to be configurable via runtime. The runtime + `. This used to be configurable via runtime. The runtime configuration was deleted without deprecation as we are fairly certain no one is using it. * log: added the ability to optionally log to a file instead of stderr via the :option:`--log-path` option. -* listeners: added :ref:`drain_type ` option. -* lua: added experimental :ref:`Lua filter `. -* mongo filter: added :ref:`fault injection `. -* mongo filter: added :ref:`"drain close" ` support. -* outlier detection: added :ref:`HTTP gateway failure type `. +* listeners: added :ref:`drain_type ` option. +* lua: added experimental :ref:`Lua filter `. +* mongo filter: added :ref:`fault injection `. +* mongo filter: added :ref:`"drain close" ` support. +* outlier detection: added :ref:`HTTP gateway failure type `. See `deprecated log `_ for outlier detection stats deprecations in this release. -* redis: the :ref:`redis proxy filter ` is now considered +* redis: the :ref:`redis proxy filter ` is now considered production ready. -* redis: added :ref:`"drain close" ` functionality. -* router: added :ref:`x-envoy-overloaded ` support. -* router: added :ref:`regex ` route matching. -* router: added :ref:`custom request headers ` +* redis: added :ref:`"drain close" ` functionality. +* router: added :ref:`x-envoy-overloaded ` support. +* router: added :ref:`regex ` route matching. +* router: added :ref:`custom request headers ` for upstream requests. * router: added :ref:`downstream IP hashing - ` for HTTP ketama routing. -* router: added :ref:`cookie hashing `. -* router: added :ref:`start_child_span ` option + ` for HTTP ketama routing. +* router: added :ref:`cookie hashing `. +* router: added :ref:`start_child_span ` option to create child span for egress calls. -* router: added optional :ref:`upstream logs `. +* router: added optional :ref:`upstream logs `. * router: added complete :ref:`custom append/override/remove support - ` of request/response headers. + ` of request/response headers. * router: added support to :ref:`specify response code during redirect - `. -* router: added :ref:`configuration ` + `. +* router: added :ref:`configuration ` to return either a 404 or 503 if the upstream cluster does not exist. -* runtime: added :ref:`comment capability `. +* runtime: added :ref:`comment capability `. * server: change default log level (:option:`-l`) to ``info``. * stats: maximum stat/name sizes and maximum number of stats are now variable via the ``--max-obj-name-len`` and ``--max-stats`` options. -* tcp proxy: added :ref:`access logging `. +* tcp proxy: added :ref:`access logging `. * tcp proxy: added :ref:`configurable connect retries - `. -* tcp proxy: enable use of :ref:`outlier detector `. -* tls: added :ref:`SNI support `. + `. +* tcp proxy: enable use of :ref:`outlier detector `. +* tls: added :ref:`SNI support `. * tls: added support for specifying :ref:`TLS session ticket keys - `. + `. * tls: allow configuration of the :ref:`min - ` and :ref:`max - ` TLS protocol versions. -* tracing: added :ref:`custom trace span decorators `. + ` and :ref:`max + ` TLS protocol versions. +* tracing: added :ref:`custom trace span decorators `. * Many small bug fixes and performance improvements not listed. Deprecated diff --git a/docs/root/version_history/v1.6.0.rst b/docs/root/version_history/v1.6.0.rst index 406ca33b4da9..20d525f0ba6d 100644 --- a/docs/root/version_history/v1.6.0.rst +++ b/docs/root/version_history/v1.6.0.rst @@ -5,121 +5,121 @@ Changes ------- * access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and - DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. + DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. DOWNSTREAM_ADDRESS access log formatter has been deprecated. * access log: added less than or equal (LE) :ref:`comparison filter - `. + `. * access log: added configuration to :ref:`runtime filter - ` to set default sampling rate, divisor, + ` to set default sampling rate, divisor, and whether to use independent randomness or not. -* admin: added :ref:`/runtime ` admin endpoint to read the +* admin: added :ref:`/runtime ` admin endpoint to read the current runtime values. * build: added support for :repo:`building Envoy with exported symbols - `. This change allows scripts loaded with the Lua filter to + `. This change allows scripts loaded with the Lua filter to load shared object libraries such as those installed via `LuaRocks `_. * config: added support for sending error details as `grpc.rpc.Status `_ - in :ref:`DiscoveryRequest `. -* config: added support for :ref:`inline delivery ` of TLS + in :ref:`DiscoveryRequest `. +* config: added support for :ref:`inline delivery ` of TLS certificates and private keys. -* config: added restrictions for the backing :ref:`config sources ` +* config: added restrictions for the backing :ref:`config sources ` of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster based xDS the backing cluster must be statically defined and be of non-EDS type. * grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services - overview ` and :ref:`GrpcService `. + overview ` and :ref:`GrpcService `. * grpc-json: added support for :ref:`inline descriptors - `. -* health check: added :ref:`gRPC health check ` + `. +* health check: added :ref:`gRPC health check ` based on `grpc.health.v1.Health `_ service. * health check: added ability to set :ref:`host header value - ` for http health check. + ` for http health check. * health check: extended the health check filter to support computation of the health check response based on the :ref:`percentage of healthy servers in upstream clusters - `. + `. * health check: added setting for :ref:`no-traffic - interval `. + interval `. * http: added idle timeout for :ref:`upstream http connections - `. + `. * http: added support for :ref:`proxying 100-Continue responses - `. + `. * http: added the ability to pass a URL encoded PEM encoded peer certificate in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. * http: added support for trusting additional hops in the :ref:`config_http_conn_man_headers_x-forwarded-for` request header. * http: added support for :ref:`incoming HTTP/1.0 - `. + `. * hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py - `, which enables using it as a parent of containers. -* ip tagging: added :ref:`HTTP IP Tagging filter `. + `, which enables using it as a parent of containers. +* ip tagging: added :ref:`HTTP IP Tagging filter `. * listeners: added support for :ref:`listening for both IPv4 and IPv6 - ` when binding to ::. + ` when binding to ::. * listeners: added support for listening on :ref:`UNIX domain sockets - `. -* listeners: added support for :ref:`abstract unix domain sockets ` on + `. +* listeners: added support for :ref:`abstract unix domain sockets ` on Linux. The abstract namespace can be used by prepending '@' to a socket path. * load balancer: added cluster configuration for :ref:`healthy panic threshold - ` percentage. -* load balancer: added :ref:`Maglev ` consistent hash + ` percentage. +* load balancer: added :ref:`Maglev ` consistent hash load balancer. * load balancer: added support for - :ref:`LocalityLbEndpoints ` priorities. -* lua: added headers :ref:`replace() ` API. -* lua: extended to support :ref:`metadata object ` API. -* redis: added local `PING` support to the :ref:`Redis filter `. + :ref:`LocalityLbEndpoints ` priorities. +* lua: added headers :ref:`replace() ` API. +* lua: extended to support :ref:`metadata object ` API. +* redis: added local `PING` support to the :ref:`Redis filter `. * redis: added ``GEORADIUS_RO`` and ``GEORADIUSBYMEMBER_RO`` to the :ref:`Redis command splitter - ` allowlist. + ` allowlist. * router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header - formatters `. The CLIENT_IP header formatter + formatters `. The CLIENT_IP header formatter has been deprecated. -* router: added gateway-error :ref:`retry-on ` policy. +* router: added gateway-error :ref:`retry-on ` policy. * router: added support for route matching based on :ref:`URL query string parameters - `. + `. * router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight - ` to be specified in configuration. + ` to be specified in configuration. * router: added support for :ref:`custom request/response headers - ` with mixed static and dynamic values. -* router: added support for :ref:`direct responses `. + ` with mixed static and dynamic values. +* router: added support for :ref:`direct responses `. I.e., sending a preconfigured HTTP response without proxying anywhere. * router: added support for :ref:`HTTPS redirects - ` on specific routes. + ` on specific routes. * router: added support for :ref:`prefix_rewrite - ` for redirects. + ` for redirects. * router: added support for :ref:`stripping the query string - ` for redirects. + ` for redirects. * router: added support for downstream request/upstream response - :ref:`header manipulation ` in :ref:`weighted - cluster `. + :ref:`header manipulation ` in :ref:`weighted + cluster `. * router: added support for :ref:`range based header matching - ` for request routing. -* squash: added support for the :ref:`Squash microservices debugger `. + ` for request routing. +* squash: added support for the :ref:`Squash microservices debugger `. Allows debugging an incoming request to a microservice in the mesh. * stats: added metrics service API implementation. -* stats: added native :ref:`DogStatsd ` support. +* stats: added native :ref:`DogStatsd ` support. * stats: added support for :ref:`fixed stats tag values - ` which will be added to all metrics. + ` which will be added to all metrics. * tcp proxy: added support for specifying a :ref:`metadata matcher - ` for upstream + ` for upstream clusters in the tcp filter. * tcp proxy: improved TCP proxy to correctly proxy TCP half-close. * tcp proxy: added :ref:`idle timeout - `. + `. * tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS. - Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. + Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. * tracing: added support for dynamically loading an :ref:`OpenTracing tracer - `. + `. * tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling - decision (using the :ref:`x-b3-sampled ` header) and + decision (using the :ref:`x-b3-sampled ` header) and have the decision propagated through to subsequently invoked services. * tracing: when using the Zipkin tracer, it is no longer necessary to propagate the - :ref:`x-ot-span-context ` header. - See more on trace context propagation :ref:`here `. + :ref:`x-ot-span-context ` header. + See more on trace context propagation :ref:`here `. * transport sockets: added transport socket interface to allow custom implementations of transport sockets. A transport socket provides read and write logic with buffer encryption and decryption (if applicable). The existing TLS implementation has been refactored with the interface. * upstream: added support for specifying an :ref:`alternate stats name - ` while emitting stats for clusters. + ` while emitting stats for clusters. * Many small bug fixes and performance improvements not listed. Deprecated diff --git a/docs/root/version_history/v1.7.0.rst b/docs/root/version_history/v1.7.0.rst index 8be132e66643..3776551a1195 100644 --- a/docs/root/version_history/v1.7.0.rst +++ b/docs/root/version_history/v1.7.0.rst @@ -6,154 +6,154 @@ Changes * access log: added ability to log response trailers. * access log: added ability to format START_TIME. -* access log: added DYNAMIC_METADATA :ref:`access log formatter `. -* access log: added :ref:`HeaderFilter ` +* access log: added DYNAMIC_METADATA :ref:`access log formatter `. +* access log: added :ref:`HeaderFilter ` to filter logs based on request headers. * access log: added ``%([1-9])?f`` as one of START_TIME specifiers to render subseconds. * access log: gRPC Access Log Service (ALS) support added for :ref:`HTTP access logs - `. + `. * access log: improved WebSocket logging. * admin: added :http:get:`/config_dump` for dumping the current configuration and associated xDS version information (if applicable). * admin: added :http:get:`/clusters?format=json` for outputing a JSON-serialized proto detailing the current status of all clusters. * admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format. -* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values. +* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values. * admin: mutations must be sent as POSTs, rather than GETs. Mutations include: :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`, :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`, :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`. -* admin: removed ``/routes`` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. +* admin: removed ``/routes`` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. * buffer filter: the buffer filter can be optionally - :ref:`disabled ` or - :ref:`overridden ` with + :ref:`disabled ` or + :ref:`overridden ` with route-local configuration. * cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml representation of the bootstrap config and overrides --config-path. -* cluster: added :ref:`option ` +* cluster: added :ref:`option ` to close tcp_proxy upstream connections when health checks fail. -* cluster: added :ref:`option ` to drain +* cluster: added :ref:`option ` to drain connections from hosts after they are removed from service discovery, regardless of health status. * cluster: fixed bug preventing the deletion of all endpoints in a priority * debug: added symbolized stack traces (where supported) * ext-authz filter: added support to raw HTTP authorization. * ext-authz filter: added support to gRPC responses to carry HTTP attributes. * grpc: support added for the full set of :ref:`Google gRPC call credentials - `. -* gzip filter: added :ref:`stats ` to the filter. + `. +* gzip filter: added :ref:`stats ` to the filter. * gzip filter: sending *accept-encoding* header as *identity* no longer compresses the payload. * health check: added ability to set :ref:`additional HTTP headers - ` for HTTP health check. + ` for HTTP health check. * health check: added support for EDS delivered :ref:`endpoint health status - `. + `. * health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy - `, :ref:`unhealthy to healthy - ` and for subsequent checks on - :ref:`unhealthy hosts `. -* health check: added support for :ref:`custom health check `. + `, :ref:`unhealthy to healthy + ` and for subsequent checks on + :ref:`unhealthy hosts `. +* health check: added support for :ref:`custom health check `. * health check: health check connections can now be configured to use http/2. * health check http filter: added - :ref:`generic header matching ` + :ref:`generic header matching ` to trigger health check response. Deprecated the endpoint option. * http: filters can now optionally support - :ref:`virtual host `, - :ref:`route `, and - :ref:`weighted cluster ` + :ref:`virtual host `, + :ref:`route `, and + :ref:`weighted cluster ` local configuration. * http: added the ability to pass DNS type Subject Alternative Names of the client certificate in the - :ref:`v1.7.0:config_http_conn_man_headers_x-forwarded-client-cert` header. + :ref:`v1.7:config_http_conn_man_headers_x-forwarded-client-cert` header. * http: local responses to gRPC requests are now sent as trailers-only gRPC responses instead of plain HTTP responses. Notably the HTTP response code is always "200" in this case, and the gRPC error code is carried in "grpc-status" header, optionally accompanied with a text message in "grpc-message" header. * http: added support for :ref:`via header - ` + ` append. * http: added a :ref:`configuration option - ` + ` to elide *x-forwarded-for* header modifications. * http: fixed a bug in inline headers where addCopy and addViaMove didn't add header values when encountering inline headers with multiple instances. -* listeners: added :ref:`tcp_fast_open_queue_length ` option. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`application_protocols ` +* listeners: added :ref:`tcp_fast_open_queue_length ` option. +* listeners: added the ability to match :ref:`FilterChain ` using + :ref:`application_protocols ` (e.g. ALPN for TLS protocol). -* listeners: ``sni_domains`` has been deprecated/renamed to :ref:`server_names `. +* listeners: ``sni_domains`` has been deprecated/renamed to :ref:`server_names `. * listeners: removed restriction on all filter chains having identical filters. * load balancer: added :ref:`weighted round robin - ` support. The round robin + ` support. The round robin scheduler now respects endpoint weights and also has improved fidelity across picks. * load balancer: :ref:`locality weighted load balancing - ` is now supported. + ` is now supported. * load balancer: ability to configure zone aware load balancer settings :ref:`through the API - `. + `. * load balancer: the :ref:`weighted least request - ` load balancing algorithm has been improved + ` load balancing algorithm has been improved to have better balance when operating in weighted mode. * logger: added the ability to optionally set the log format via the :option:`--log-format` option. -* logger: all :ref:`logging levels ` can be configured +* logger: all :ref:`logging levels ` can be configured at run-time: trace debug info warning error critical. -* rbac http filter: a :ref:`role-based access control http filter ` has been added. +* rbac http filter: a :ref:`role-based access control http filter ` has been added. * router: the behavior of per-try timeouts have changed in the case where a portion of the response has already been proxied downstream when the timeout occurs. Previously, the response would be reset leading to either an HTTP/2 reset or an HTTP/1 closed connection and a partial response. Now, the timeout will be ignored and the response will continue to proxy up to the global request timeout. -* router: changed the behavior of :ref:`source IP routing ` +* router: changed the behavior of :ref:`source IP routing ` to ignore the source port. -* router: added an :ref:`prefix_match ` match type +* router: added an :ref:`prefix_match ` match type to explicitly match based on the prefix of a header value. -* router: added an :ref:`suffix_match ` match type +* router: added an :ref:`suffix_match ` match type to explicitly match based on the suffix of a header value. -* router: added an :ref:`present_match ` match type +* router: added an :ref:`present_match ` match type to explicitly match based on a header's presence. -* router: added an :ref:`invert_match ` config option +* router: added an :ref:`invert_match ` config option which supports inverting all other match types to match based on headers which are not a desired value. -* router: allow :ref:`cookie routing ` to +* router: allow :ref:`cookie routing ` to generate session cookies. * router: added START_TIME as one of supported variables in :ref:`header - formatters `. -* router: added a :ref:`max_grpc_timeout ` + formatters `. +* router: added a :ref:`max_grpc_timeout ` config option to specify the maximum allowable value for timeouts decoded from gRPC header field ``grpc-timeout``. * router: added a :ref:`configuration option - ` to disable *x-envoy-* + ` to disable *x-envoy-* header generation. * router: added 'unavailable' to the retriable gRPC status codes that can be specified - through :ref:`x-envoy-retry-grpc-on `. -* sockets: added :ref:`tap transport socket extension ` to support + through :ref:`x-envoy-retry-grpc-on `. +* sockets: added :ref:`tap transport socket extension ` to support recording plain text traffic and PCAP generation. * sockets: added ``IP_FREEBIND`` socket option support for :ref:`listeners - ` and upstream connections via + ` and upstream connections via :ref:`cluster manager wide - ` and - :ref:`cluster specific ` options. + ` and + :ref:`cluster specific ` options. * sockets: added ``IP_TRANSPARENT`` socket option support for :ref:`listeners - `. + `. * sockets: added ``SO_KEEPALIVE`` socket option for upstream connections - :ref:`per cluster `. + :ref:`per cluster `. * stats: added support for histograms. -* stats: added :ref:`option to configure the statsd prefix `. +* stats: added :ref:`option to configure the statsd prefix `. * stats: updated stats sink interface to flush through a single call. * tls: added support for - :ref:`verify_certificate_spki `. + :ref:`verify_certificate_spki `. * tls: added support for multiple - :ref:`verify_certificate_hash ` + :ref:`verify_certificate_hash ` values. * tls: added support for using - :ref:`verify_certificate_spki ` - and :ref:`verify_certificate_hash ` - without :ref:`trusted_ca `. + :ref:`verify_certificate_spki ` + and :ref:`verify_certificate_hash ` + without :ref:`trusted_ca `. * tls: added support for allowing expired certificates with - :ref:`allow_expired_certificate `. -* tls: added support for :ref:`renegotiation ` + :ref:`allow_expired_certificate `. +* tls: added support for :ref:`renegotiation ` when acting as a client. * tls: removed support for legacy SHA-2 CBC cipher suites. * tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if - to use it. For example, if the :ref:`x-b3-sampled ` header + to use it. For example, if the :ref:`x-b3-sampled ` header is supplied with the client request, its value will override any sampling decision made by the Envoy proxy. * websocket: support configuring idle_timeout and max_connect_attempts. -* upstream: added support for host override for a request in :ref:`Original destination host request header `. -* header to metadata: added :ref:`HTTP Header to Metadata filter `. +* upstream: added support for host override for a request in :ref:`Original destination host request header `. +* header to metadata: added :ref:`HTTP Header to Metadata filter `. Deprecated ---------- diff --git a/docs/root/version_history/v1.8.0.rst b/docs/root/version_history/v1.8.0.rst index 4f1d07b22a87..d6cf45d5703d 100644 --- a/docs/root/version_history/v1.8.0.rst +++ b/docs/root/version_history/v1.8.0.rst @@ -4,72 +4,72 @@ Changes ------- -* access log: added :ref:`response flag filter ` +* access log: added :ref:`response flag filter ` to filter based on the presence of Envoy response flags. * access log: added RESPONSE_DURATION and RESPONSE_TX_DURATION. * access log: added REQUESTED_SERVER_NAME for SNI to tcp_proxy and http * admin: added :http:get:`/hystrix_event_stream` as an endpoint for monitoring envoy's statistics through `Hystrix dashboard `_. -* cli: added support for :ref:`component log level ` command line option for configuring log levels of individual components. -* cluster: added :ref:`option ` to merge +* cli: added support for :ref:`component log level ` command line option for configuring log levels of individual components. +* cluster: added :ref:`option ` to merge health check/weight/metadata updates within the given duration. * config: regex validation added to limit to a maximum of 1024 characters. * config: v1 disabled by default. v1 support remains available until October via flipping --v2-config-only=false. * config: v1 disabled by default. v1 support remains available until October via deprecated flag --allow-deprecated-v1-api. -* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure ` - stat is incremented in case of network failure and :ref:`update_rejected ` stat is incremented +* config: fixed stat inconsistency between xDS and ADS implementation. :ref:`update_failure ` + stat is incremented in case of network failure and :ref:`update_rejected ` stat is incremented in case of schema/validation error. -* config: added a stat :ref:`connected_state ` that indicates current connected state of Envoy with +* config: added a stat :ref:`connected_state ` that indicates current connected state of Envoy with management server. -* ext_authz: added support for configuring additional :ref:`authorization headers ` +* ext_authz: added support for configuring additional :ref:`authorization headers ` to be sent from Envoy to the authorization service. -* fault: added support for fractional percentages in :ref:`FaultDelay ` - and in :ref:`FaultAbort `. +* fault: added support for fractional percentages in :ref:`FaultDelay ` + and in :ref:`FaultAbort `. * grpc-json: added support for building HTTP response from `google.api.HttpBody `_. -* health check: added support for :ref:`custom health check `. -* health check: added support for :ref:`specifying jitter as a percentage `. -* health_check: added support for :ref:`health check event logging `. -* health_check: added :ref:`timestamp ` - to the :ref:`health check event ` definition. -* health_check: added support for specifying :ref:`custom request headers ` +* health check: added support for :ref:`custom health check `. +* health check: added support for :ref:`specifying jitter as a percentage `. +* health_check: added support for :ref:`health check event logging `. +* health_check: added :ref:`timestamp ` + to the :ref:`health check event ` definition. +* health_check: added support for specifying :ref:`custom request headers ` to HTTP health checker requests. * http: added support for a :ref:`per-stream idle timeout - `. This applies at both :ref:`connection manager - ` - and :ref:`per-route granularity `. The timeout + `. This applies at both :ref:`connection manager + ` + and :ref:`per-route granularity `. The timeout defaults to 5 minutes; if you have other timeouts (e.g. connection idle timeout, upstream response per-retry) that are longer than this in duration, you may want to consider setting a non-default per-stream idle timeout. -* http: added upstream_rq_completed counter for :ref:`total requests completed ` to dynamic HTTP counters. -* http: added downstream_rq_completed counter for :ref:`total requests completed `, including on a :ref:`per-listener basis `. +* http: added upstream_rq_completed counter for :ref:`total requests completed ` to dynamic HTTP counters. +* http: added downstream_rq_completed counter for :ref:`total requests completed `, including on a :ref:`per-listener basis `. * http: added generic :ref:`Upgrade support - `. + `. * http: better handling of HEAD requests. Now sending transfer-encoding: chunked rather than content-length: 0. * http: fixed missing support for appending to predefined inline headers, e.g. *authorization*, in features that interact with request and response headers, e.g. :ref:`request_headers_to_add - `. For example, a + `. For example, a request header *authorization: token1* will appear as *authorization: token1,token2*, after having :ref:`request_headers_to_add - ` with *authorization: + ` with *authorization: token2* applied. * http: response filters not applied to early error paths such as http_parser generated 400s. * http: restrictions added to reject *:*-prefixed pseudo-headers in :ref:`custom - request headers `. -* http: :ref:`hpack_table_size ` now controls + request headers `. +* http: :ref:`hpack_table_size ` now controls dynamic table size of both: encoder and decoder. * http: added support for removing request headers using :ref:`request_headers_to_remove - `. -* http: added support for a :ref:`delayed close timeout ` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second. + `. +* http: added support for a :ref:`delayed close timeout ` to mitigate race conditions when closing connections to downstream HTTP clients. The timeout defaults to 1 second. * jwt-authn filter: add support for per route JWT requirements. -* listeners: added the ability to match :ref:`FilterChain ` using - :ref:`destination_port ` and - :ref:`prefix_ranges `. -* lua: added :ref:`connection() ` wrapper and *ssl()* API. -* lua: added :ref:`streamInfo() ` wrapper and *protocol()* API. -* lua: added :ref:`streamInfo():dynamicMetadata() ` API. -* network: introduced :ref:`sni_cluster ` network filter that forwards connections to the +* listeners: added the ability to match :ref:`FilterChain ` using + :ref:`destination_port ` and + :ref:`prefix_ranges `. +* lua: added :ref:`connection() ` wrapper and *ssl()* API. +* lua: added :ref:`streamInfo() ` wrapper and *protocol()* API. +* lua: added :ref:`streamInfo():dynamicMetadata() ` API. +* network: introduced :ref:`sni_cluster ` network filter that forwards connections to the upstream cluster specified by the SNI value presented by the client during a TLS handshake. * proxy_protocol: added support for HAProxy Proxy Protocol v2 (AF_INET/AF_INET6 only). * ratelimit: added support for :repo:`api/envoy/service/ratelimit/v2/rls.proto`. @@ -77,25 +77,25 @@ Changes Envoy can use either proto to send client requests to a ratelimit server with the use of the ``use_data_plane_proto`` boolean flag in the ratelimit configuration. Support for the legacy proto ``source/common/ratelimit/ratelimit.proto`` is deprecated and will be removed at the start of the 1.9.0 release cycle. -* ratelimit: added :ref:`failure_mode_deny ` option to control traffic flow in +* ratelimit: added :ref:`failure_mode_deny ` option to control traffic flow in case of rate limit service error. -* rbac config: added a :ref:`principal_name ` field and +* rbac config: added a :ref:`principal_name ` field and removed the old ``name`` field to give more flexibility for matching certificate identity. -* rbac network filter: a :ref:`role-based access control network filter ` has been added. -* rest-api: added ability to set the :ref:`request timeout ` for REST API requests. +* rbac network filter: a :ref:`role-based access control network filter ` has been added. +* rest-api: added ability to set the :ref:`request timeout ` for REST API requests. * route checker: added v2 config support and removed support for v1 configs. -* router: added ability to set request/response headers at the :ref:`v1.8.0:envoy_api_msg_route.Route` level. -* stats: added :ref:`option to configure the DogStatsD metric name prefix ` to DogStatsdSink. -* tcp_proxy: added support for :ref:`weighted clusters `. +* router: added ability to set request/response headers at the :ref:`v1.8:envoy_api_msg_route.Route` level. +* stats: added :ref:`option to configure the DogStatsD metric name prefix ` to DogStatsdSink. +* tcp_proxy: added support for :ref:`weighted clusters `. * thrift_proxy: introduced thrift routing, moved configuration to correct location * thrift_proxy: introduced thrift configurable decoder filters -* tls: implemented :ref:`Secret Discovery Service `. +* tls: implemented :ref:`Secret Discovery Service `. * tracing: added support for configuration of :ref:`tracing sampling - `. + `. * upstream: added configuration option to the subset load balancer to take locality weights into account when selecting a host from a subset. -* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header - for overriding destination address when using the :ref:`Original Destination ` +* upstream: require opt-in to use the :ref:`x-envoy-original-dst-host ` header + for overriding destination address when using the :ref:`Original Destination ` load balancing policy. Deprecated diff --git a/docs/root/version_history/v1.9.0.rst b/docs/root/version_history/v1.9.0.rst index d9056fb3aeb4..5ef601694542 100644 --- a/docs/root/version_history/v1.9.0.rst +++ b/docs/root/version_history/v1.9.0.rst @@ -4,31 +4,31 @@ Changes ------- -* access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. +* access log: added a :ref:`JSON logging mode ` to output access logs in JSON format. * access log: added dynamic metadata to access log messages streamed over gRPC. * access log: added DOWNSTREAM_CONNECTION_TERMINATION. * admin: :http:post:`/logging` now responds with 200 while there are no params. -* admin: added support for displaying subject alternate names in :ref:`certs ` end point. +* admin: added support for displaying subject alternate names in :ref:`certs ` end point. * admin: added host weight to the :http:get:`/clusters?format=json` end point response. * admin: :http:get:`/server_info` now responds with a JSON object instead of a single string. * admin: :http:get:`/server_info` now exposes what stage of initialization the server is currently in. * admin: added support for displaying command line options in :http:get:`/server_info` end point. * circuit-breaker: added cx_open, rq_pending_open, rq_open and rq_retry_open gauges to expose live - state via :ref:`circuit breakers statistics `. -* cluster: set a default of 1s for :ref:`option `. + state via :ref:`circuit breakers statistics `. +* cluster: set a default of 1s for :ref:`option `. * config: removed support for the v1 API. -* config: added support for :ref:`rate limiting ` discovery request calls. -* cors: added :ref:`invalid/valid stats ` to filter. +* config: added support for :ref:`rate limiting ` discovery request calls. +* cors: added :ref:`invalid/valid stats ` to filter. * ext-authz: added support for providing per route config - optionally disable the filter and provide context extensions. * fault: removed integer percentage support. * grpc-json: added support for :ref:`ignoring query parameters - `. -* health check: added :ref:`logging health check failure events `. + `. +* health check: added :ref:`logging health check failure events `. * health check: added ability to set :ref:`authority header value - ` for gRPC health check. -* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT `. + ` for gRPC health check. +* http: added HTTP/2 WebSocket proxying via :ref:`extended CONNECT `. * http: added limits to the number and length of header modifications in all fields request_headers_to_add and response_headers_to_add. These limits are very high and should only be used as a last-resort safeguard. -* http: added support for a :ref:`request timeout `. The timeout is disabled by default. +* http: added support for a :ref:`request timeout `. The timeout is disabled by default. * http: no longer adding whitespace when appending X-Forwarded-For headers. **Warning**: this is not compatible with 1.7.0 builds prior to `9d3a4eb4ac44be9f0651fcc7f87ad98c538b01ee `_. See `#3611 `_ for details. @@ -36,66 +36,66 @@ Changes value to override the default HTTP to gRPC status mapping. * http: no longer close the TCP connection when a HTTP/1 request is retried due to a response with empty body. -* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter `, like application/grpc+proto. +* http: added support for more gRPC content-type headers in :ref:`gRPC bridge filter `, like application/grpc+proto. * listeners: all listener filters are now governed by the :ref:`listener_filters_timeout - ` setting. The hard coded 15s timeout in - the :ref:`TLS inspector listener filter ` is superseded by + ` setting. The hard coded 15s timeout in + the :ref:`TLS inspector listener filter ` is superseded by this setting. -* listeners: added the ability to match :ref:`FilterChain ` using :ref:`source_type `. -* load balancer: added a `configuration ` option to specify the number of choices made in P2C. +* listeners: added the ability to match :ref:`FilterChain ` using :ref:`source_type `. +* load balancer: added a `configuration ` option to specify the number of choices made in P2C. * logging: added missing [ in log prefix. -* mongo_proxy: added :ref:`dynamic metadata `. +* mongo_proxy: added :ref:`dynamic metadata `. * network: removed the reference to ``FilterState`` in ``Connection`` in favor of ``StreamInfo``. -* rate-limit: added :ref:`configuration ` +* rate-limit: added :ref:`configuration ` to specify whether the ``GrpcStatus`` status returned should be ``RESOURCE_EXHAUSTED`` or ``UNAVAILABLE`` when a gRPC call is rate limited. * rate-limit: removed support for the legacy ratelimit service and made the data-plane-api - :ref:`rls.proto ` based implementation default. -* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration `. -* rate-limit: added :ref:`rate_limit_service ` configuration to filters. + :ref:`rls.proto ` based implementation default. +* rate-limit: removed the deprecated cluster_name attribute in :ref:`rate limit service configuration `. +* rate-limit: added :ref:`rate_limit_service ` configuration to filters. * rbac: added dynamic metadata to the network level filter. -* rbac: added support for permission matching by :ref:`requested server name `. +* rbac: added support for permission matching by :ref:`requested server name `. * redis: static cluster configuration is no longer required. Redis proxy will work with clusters delivered via CDS. -* router: added ability to configure arbitrary :ref:`retriable status codes. ` +* router: added ability to configure arbitrary :ref:`retriable status codes. ` * router: added ability to set attempt count in upstream requests, see :ref:`virtual host's include request - attempt count flag `. -* router: added internal :ref:`grpc-retry-on ` policy. -* router: added :ref:`scheme_redirect ` and - :ref:`port_redirect ` to define the respective + attempt count flag `. +* router: added internal :ref:`grpc-retry-on ` policy. +* router: added :ref:`scheme_redirect ` and + :ref:`port_redirect ` to define the respective scheme and port rewriting RedirectAction. -* router: when :ref:`max_grpc_timeout ` +* router: when :ref:`max_grpc_timeout ` is set, Envoy will now add or update the grpc-timeout header to reflect Envoy's expected timeout. * router: per try timeouts now starts when an upstream stream is ready instead of when the request has been fully decoded by Envoy. -* router: added support for not retrying :ref:`rate limited requests `. Rate limit filter now sets the :ref:`x-envoy-ratelimited ` +* router: added support for not retrying :ref:`rate limited requests `. Rate limit filter now sets the :ref:`x-envoy-ratelimited ` header so the rate limited requests that may have been retried earlier will not be retried with this change. -* router: added support for enabling upgrades on a :ref:`per-route ` basis. +* router: added support for enabling upgrades on a :ref:`per-route ` basis. * router: support configuring a default fraction of mirror traffic via - :ref:`runtime_fraction `. -* sandbox: added :ref:`cors sandbox `. + :ref:`runtime_fraction `. +* sandbox: added :ref:`cors sandbox `. * server: added ``SIGINT`` (Ctrl-C) handler to gracefully shutdown Envoy like ``SIGTERM``. -* stats: added :ref:`stats_matcher ` to the bootstrap config for granular control of stat instantiation. +* stats: added :ref:`stats_matcher ` to the bootstrap config for granular control of stat instantiation. * stream: renamed the ``RequestInfo`` namespace to ``StreamInfo`` to better match its behaviour within TCP and HTTP implementations. * stream: renamed ``perRequestState`` to ``filterState`` in ``StreamInfo``. * stream: added ``downstreamDirectRemoteAddress`` to ``StreamInfo``. * thrift_proxy: introduced thrift rate limiter filter. * tls: added ssl.curves., ssl.sigalgs. and ssl.versions. to - :ref:`listener metrics ` to track TLS algorithms and versions in use. -* tls: added support for :ref:`client-side session resumption `. -* tls: added support for CRLs in :ref:`trusted_ca `. -* tls: added support for :ref:`multiple server TLS certificates `. -* tls: added support for :ref:`password encrypted private keys `. -* tls: added the ability to build :ref:`BoringSSL FIPS ` using ``--define boringssl=fips`` Bazel option. + :ref:`listener metrics ` to track TLS algorithms and versions in use. +* tls: added support for :ref:`client-side session resumption `. +* tls: added support for CRLs in :ref:`trusted_ca `. +* tls: added support for :ref:`multiple server TLS certificates `. +* tls: added support for :ref:`password encrypted private keys `. +* tls: added the ability to build :ref:`BoringSSL FIPS ` using ``--define boringssl=fips`` Bazel option. * tls: removed support for ECDSA certificates with curves other than P-256. * tls: removed support for RSA certificates with keys smaller than 2048-bits. -* tracing: added support to the Zipkin tracer for the :ref:`b3 ` single header format. -* tracing: added support for :ref:`Datadog ` tracer. -* upstream: added :ref:`scale_locality_weight ` to enable +* tracing: added support to the Zipkin tracer for the :ref:`b3 ` single header format. +* tracing: added support for :ref:`Datadog ` tracer. +* upstream: added :ref:`scale_locality_weight ` to enable scaling locality weights by number of hosts removed by subset lb predicates. -* upstream: changed how load calculation for :ref:`priority levels ` and :ref:`panic thresholds ` interact. As long as normalized total health is 100% panic thresholds are disregarded. -* upstream: changed the default hash for :ref:`ring hash ` from std::hash to `xxHash `_. +* upstream: changed how load calculation for :ref:`priority levels ` and :ref:`panic thresholds ` interact. As long as normalized total health is 100% panic thresholds are disregarded. +* upstream: changed the default hash for :ref:`ring hash ` from std::hash to `xxHash `_. * upstream: when using active health checking and STRICT_DNS with several addresses that resolve to the same hosts, Envoy will now health check each host independently. diff --git a/docs/root/version_history/v1.9.1.rst b/docs/root/version_history/v1.9.1.rst index 7027026e5094..e7121cdc51fa 100644 --- a/docs/root/version_history/v1.9.1.rst +++ b/docs/root/version_history/v1.9.1.rst @@ -7,5 +7,5 @@ Changes * http: fixed CVE-2019-9900 by rejecting HTTP/1.x headers with embedded NUL characters. * http: fixed CVE-2019-9901 by normalizing HTTP paths prior to routing or L7 data plane processing. This defaults off and is configurable via either HTTP connection manager :ref:`normalize_path - ` - or the :ref:`runtime `. + ` + or the :ref:`runtime `. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 371bc2c39878..7cca61970b5b 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,6 +7,7 @@ Version history :titlesonly: current + v1.20.0 v1.19.1 v1.19.0 v1.18.4 diff --git a/docs/v2_mapping.json b/docs/v2_mapping.json index 8d437476dff6..4e9b014276d0 100644 --- a/docs/v2_mapping.json +++ b/docs/v2_mapping.json @@ -56,7 +56,7 @@ "envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto": "envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto", "envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto": "envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto", "envoy/extensions/filters/http/buffer/v3/buffer.proto": "envoy/config/filter/http/buffer/v2/buffer.proto", - "envoy/extensions/filters/http/cache/v3alpha/cache.proto": "envoy/config/filter/http/cache/v2alpha/cache.proto", + "envoy/extensions/filters/http/cache/v3/cache.proto": "envoy/config/filter/http/cache/v2alpha/cache.proto", "envoy/extensions/filters/http/compressor/v3/compressor.proto": "envoy/config/filter/http/compressor/v2/compressor.proto", "envoy/extensions/filters/http/cors/v3/cors.proto": "envoy/config/filter/http/cors/v2/cors.proto", "envoy/extensions/filters/http/csrf/v3/csrf.proto": "envoy/config/filter/http/csrf/v2/csrf.proto", diff --git a/envoy/common/backoff_strategy.h b/envoy/common/backoff_strategy.h index b51880a2e2e0..2288d93beb7b 100644 --- a/envoy/common/backoff_strategy.h +++ b/envoy/common/backoff_strategy.h @@ -22,6 +22,12 @@ class BackOffStrategy { * Resets the intervals so that the back off intervals can start again. */ virtual void reset() PURE; + + /** + * Resets the interval with a (potentially) new starting point. + * @param base_interval the new base interval for the backoff strategy. + */ + virtual void reset(uint64_t base_interval) PURE; }; using BackOffStrategyPtr = std::unique_ptr; diff --git a/envoy/common/platform.h b/envoy/common/platform.h index 96ed53f8562a..e01dff02e140 100644 --- a/envoy/common/platform.h +++ b/envoy/common/platform.h @@ -178,7 +178,9 @@ constexpr bool win32SupportsOriginalDestination() { #include #include #include +#if !defined(DO_NOT_INCLUDE_NETINET_TCP_H) #include +#endif #include // for UDP_GRO #include #include // for mode_t @@ -305,6 +307,14 @@ struct mmsghdr { #endif // __ANDROID_API__ < 24 #endif // ifdef __ANDROID_API__ +// TODO: Remove once bazel supports NDKs > 21 +#define SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR +#ifdef __ANDROID_API__ +#if __ANDROID_API__ < 24 +#undef SUPPORTS_CPP_17_CONTIGUOUS_ITERATOR +#endif // __ANDROID_API__ < 24 +#endif // ifdef __ANDROID_API__ + // https://android.googlesource.com/platform/bionic/+/master/docs/status.md // ``pthread_getname_np`` is introduced in API 26 #define SUPPORTS_PTHREAD_NAMING 0 diff --git a/envoy/config/extension_config_provider.h b/envoy/config/extension_config_provider.h index ce84225eb623..e55ca63543f6 100644 --- a/envoy/config/extension_config_provider.h +++ b/envoy/config/extension_config_provider.h @@ -35,17 +35,20 @@ template class ExtensionConfigProvider { virtual absl::optional config() PURE; }; -template class DynamicExtensionConfigProviderBase { +class DynamicExtensionConfigProviderBase { public: virtual ~DynamicExtensionConfigProviderBase() = default; /** - * Update the provider with a new configuration. - * @param config is an extension factory callback to replace the existing configuration. + * Update the provider with a new configuration. This interface accepts proto rather than a + * factory callback so that it can be generic over factory types. If instantiating the factory + * throws, it should only do so on the main thread, before any changes are applied to workers. + * @param config is the new configuration. It is expected that the configuration has already been + * validated. * @param version_info is the version of the new extension configuration. * @param cb the continuation callback for a completed configuration application on all threads. */ - virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info, + virtual void onConfigUpdate(const Protobuf::Message& config, const std::string& version_info, ConfigAppliedCb applied_on_all_threads) PURE; /** @@ -61,7 +64,7 @@ template class DynamicExtensionConfigProviderBase { }; template -class DynamicExtensionConfigProvider : public DynamicExtensionConfigProviderBase, +class DynamicExtensionConfigProvider : public DynamicExtensionConfigProviderBase, public ExtensionConfigProvider {}; } // namespace Config diff --git a/envoy/config/grpc_mux.h b/envoy/config/grpc_mux.h index f3c5d7c00808..0139cb3d9524 100644 --- a/envoy/config/grpc_mux.h +++ b/envoy/config/grpc_mux.h @@ -105,9 +105,6 @@ class GrpcMux { virtual void requestOnDemandUpdate(const std::string& type_url, const absl::flat_hash_set& for_update) PURE; - - // TODO (dmitri-d) remove this when legacy muxes have been removed - virtual bool isUnified() const { return false; } }; using GrpcMuxPtr = std::unique_ptr; diff --git a/envoy/config/subscription.h b/envoy/config/subscription.h index 8f7ba87c2036..ed7d6ecd8374 100644 --- a/envoy/config/subscription.h +++ b/envoy/config/subscription.h @@ -146,6 +146,7 @@ class UntypedConfigUpdateCallbacks { public: virtual ~UntypedConfigUpdateCallbacks() = default; + // TODO (dmitri-d) remove this method when legacy sotw mux has been removed. /** * Called when a state-of-the-world configuration update is received. (State-of-the-world is * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC). @@ -158,6 +159,17 @@ class UntypedConfigUpdateCallbacks { virtual void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, const std::string& version_info) PURE; + /** + * Called when a non-delta gRPC configuration update is received. + * @param resources vector of fetched resources corresponding to the configuration update. + * @param version_info supplies the version information as supplied by the xDS discovery response. + * @throw EnvoyException with reason if the configuration is rejected. Otherwise the configuration + * is accepted. Accepted configurations have their version_info reflected in subsequent + * requests. + */ + virtual void onConfigUpdate(const std::vector& resources, + const std::string& version_info) PURE; + /** * Called when a delta configuration update is received. * @param added_resources resources newly added since the previous fetch. diff --git a/envoy/event/dispatcher.h b/envoy/event/dispatcher.h index 9b887a0c4b26..94cb73e5c4fb 100644 --- a/envoy/event/dispatcher.h +++ b/envoy/event/dispatcher.h @@ -218,19 +218,6 @@ class Dispatcher : public DispatcherBase, public ScopeTracker { Network::TransportSocketPtr&& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options) PURE; - /** - * Creates an async DNS resolver. The resolver should only be used on the thread that runs this - * dispatcher. - * @param resolvers supplies the addresses of DNS resolvers that this resolver should use. If left - * empty, it will not use any specific resolvers, but use defaults (/etc/resolv.conf) - * @param dns_resolver_options supplies the aggregated area options flags needed for dns resolver - * init. - * @return Network::DnsResolverSharedPtr that is owned by the caller. - */ - virtual Network::DnsResolverSharedPtr - createDnsResolver(const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) PURE; - /** * @return Filesystem::WatcherPtr a filesystem watcher owned by the caller. */ diff --git a/envoy/http/BUILD b/envoy/http/BUILD index 3c741ea6b449..b9f7bfe3089b 100644 --- a/envoy/http/BUILD +++ b/envoy/http/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["alternate_protocols_cache.h"], deps = [ "//envoy/common:time_interface", + "//envoy/event:dispatcher_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/envoy/http/alternate_protocols_cache.h b/envoy/http/alternate_protocols_cache.h index e688fb4417fd..535f40bd982d 100644 --- a/envoy/http/alternate_protocols_cache.h +++ b/envoy/http/alternate_protocols_cache.h @@ -9,6 +9,7 @@ #include "envoy/common/optref.h" #include "envoy/common/time.h" #include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/event/dispatcher.h" #include "absl/strings/string_view.h" @@ -128,9 +129,11 @@ class AlternateProtocolsCacheManager { * Get an alternate protocols cache. * @param config supplies the cache parameters. If a cache exists with the same parameters it * will be returned, otherwise a new one will be created. + * @param dispatcher supplies the current thread's dispatcher, for cache creation. */ virtual AlternateProtocolsCacheSharedPtr - getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& config) PURE; + getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& config, + Event::Dispatcher& dispatcher) PURE; }; using AlternateProtocolsCacheManagerSharedPtr = std::shared_ptr; diff --git a/envoy/http/codec.h b/envoy/http/codec.h index 023b6a129bed..9a336b267fd6 100644 --- a/envoy/http/codec.h +++ b/envoy/http/codec.h @@ -225,7 +225,7 @@ class RequestDecoder : public virtual StreamDecoder { /** * @return StreamInfo::StreamInfo& the stream_info for this stream. */ - virtual const StreamInfo::StreamInfo& streamInfo() const PURE; + virtual StreamInfo::StreamInfo& streamInfo() PURE; }; /** @@ -356,6 +356,11 @@ class Stream : public StreamResetHandler { * @param the account to assign this stream. */ virtual void setAccount(Buffer::BufferMemoryAccountSharedPtr account) PURE; + + /** + * Get the bytes meter for this stream. + */ + virtual const StreamInfo::BytesMeterSharedPtr& bytesMeter() PURE; }; /** @@ -389,6 +394,14 @@ class ConnectionCallbacks { * @param ReceivedSettings the settings received from the peer. */ virtual void onSettings(ReceivedSettings& settings) { UNREFERENCED_PARAMETER(settings); } + + /** + * Fires when the MAX_STREAMS frame is received from the peer. + * This is an HTTP/3 frame, indicating the new maximum stream ID which can be opened. + * This may occur multiple times across the lifetime of an HTTP/3 connection. + * @param num_streams the number of streams now allowed to be opened. + */ + virtual void onMaxStreamsChanged(uint32_t num_streams) { UNREFERENCED_PARAMETER(num_streams); } }; /** diff --git a/envoy/http/conn_pool.h b/envoy/http/conn_pool.h index 5cab296be468..5ebbd8959c6d 100644 --- a/envoy/http/conn_pool.h +++ b/envoy/http/conn_pool.h @@ -47,6 +47,34 @@ class Callbacks { absl::optional protocol) PURE; }; +class Instance; + +/** + * Pool callbacks invoked to track the lifetime of connections in the pool. + */ +class ConnectionLifetimeCallbacks { +public: + virtual ~ConnectionLifetimeCallbacks() = default; + + /** + * Called when a connection is open for requests in a pool. + * @param pool which the connection is associated with. + * @param hash_key the hash key used for this connection. + * @param connection newly open connection. + */ + virtual void onConnectionOpen(Instance& pool, std::vector& hash_key, + const Network::Connection& connection) PURE; + + /** + * Called when a connection is draining and may no longer be used for requests. + * @param pool which the connection is associated with. + * @param hash_key the hash key used for this connection. + * @param connection newly open connection. + */ + virtual void onConnectionDraining(Instance& pool, std::vector& hash_key, + const Network::Connection& connection) PURE; +}; + /** * An instance of a generic connection pool. */ diff --git a/envoy/http/filter.h b/envoy/http/filter.h index 6dcf6f2f42f3..9a6527ca6519 100644 --- a/envoy/http/filter.h +++ b/envoy/http/filter.h @@ -1043,6 +1043,12 @@ class FilterChainFactoryCallbacks { * @param handler supplies the handler to add. */ virtual void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) PURE; + + /** + * Allows filters to access the thread local dispatcher. + * @param return the worker thread's dispatcher. + */ + virtual Event::Dispatcher& dispatcher() PURE; }; /** diff --git a/envoy/http/query_params.h b/envoy/http/query_params.h index d30ae58b1ab3..e500f93ca3c8 100644 --- a/envoy/http/query_params.h +++ b/envoy/http/query_params.h @@ -2,6 +2,7 @@ #include #include +#include namespace Envoy { namespace Http { @@ -12,6 +13,7 @@ namespace Utility { // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/query_params.h using QueryParams = std::map; +using QueryParamsVector = std::vector>; } // namespace Utility } // namespace Http diff --git a/envoy/network/BUILD b/envoy/network/BUILD index 3caab27a2aaa..3a292bdd6cd7 100644 --- a/envoy/network/BUILD +++ b/envoy/network/BUILD @@ -63,6 +63,15 @@ envoy_cc_library( deps = ["//envoy/network:address_interface"], ) +envoy_cc_library( + name = "dns_resolver_interface", + hdrs = ["dns_resolver.h"], + deps = [ + "//envoy/api:api_interface", + "//source/common/config:utility_lib", + ], +) + envoy_cc_library( name = "drain_decision_interface", hdrs = ["drain_decision.h"], @@ -177,6 +186,7 @@ envoy_cc_library( ":udp_packet_writer_handler_interface", "//envoy/access_log:access_log_interface", "//envoy/common:resource_interface", + "//envoy/config:typed_metadata_interface", "//envoy/init:manager_interface", "//envoy/stats:stats_interface", "//source/common/common:interval_value", diff --git a/envoy/network/dns.h b/envoy/network/dns.h index 38dd39bf4576..072829c02765 100644 --- a/envoy/network/dns.h +++ b/envoy/network/dns.h @@ -46,7 +46,7 @@ struct DnsResponse { const std::chrono::seconds ttl_; }; -enum class DnsLookupFamily { V4Only, V6Only, Auto, V4Preferred }; +enum class DnsLookupFamily { V4Only, V6Only, Auto, V4Preferred, All }; /** * An asynchronous DNS resolver. diff --git a/envoy/network/dns_resolver.h b/envoy/network/dns_resolver.h new file mode 100644 index 000000000000..2a326f152fe8 --- /dev/null +++ b/envoy/network/dns_resolver.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/api/api.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/dns.h" + +#include "source/common/config/utility.h" + +namespace Envoy { +namespace Network { + +constexpr absl::string_view CaresDnsResolver = "envoy.network.dns_resolver.cares"; +constexpr absl::string_view AppleDnsResolver = "envoy.network.dns_resolver.apple"; +constexpr absl::string_view DnsResolverCategory = "envoy.network.dns_resolver"; + +class DnsResolverFactory : public Config::TypedFactory { +public: + /* + * @returns a DnsResolver object. + * @param dispatcher: the local dispatcher thread + * @param api: API interface to interact with system resources + * @param typed_dns_resolver_config: the typed DNS resolver config + */ + virtual DnsResolverSharedPtr createDnsResolver( + Event::Dispatcher& dispatcher, Api::Api& api, + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) const PURE; + + std::string category() const override { return std::string(DnsResolverCategory); } +}; + +} // namespace Network +} // namespace Envoy diff --git a/envoy/network/filter.h b/envoy/network/filter.h index f14a11bf53c8..1328d335f367 100644 --- a/envoy/network/filter.h +++ b/envoy/network/filter.h @@ -444,15 +444,17 @@ class UdpListenerReadFilter { /** * Called when a new data packet is received on a UDP listener. * @param data supplies the read data which may be modified. + * @return status used by the filter manager to manage further filter iteration. */ - virtual void onData(UdpRecvData& data) PURE; + virtual FilterStatus onData(UdpRecvData& data) PURE; /** * Called when there is an error event in the receive data path. * * @param error_code supplies the received error on the listener. + * @return status used by the filter manager to manage further filter iteration. */ - virtual void onReceiveError(Api::IoError::IoErrorCode error_code) PURE; + virtual FilterStatus onReceiveError(Api::IoError::IoErrorCode error_code) PURE; protected: /** diff --git a/envoy/network/io_handle.h b/envoy/network/io_handle.h index 644228762fc5..a960887a56a1 100644 --- a/envoy/network/io_handle.h +++ b/envoy/network/io_handle.h @@ -108,6 +108,7 @@ class IoHandle { * Send a message to the address. * @param slices points to the location of data to be sent. * @param num_slice indicates number of slices |slices| contains. + * @param flags flags to pass to the underlying sendmsg function (see man 2 sendmsg). * @param self_ip is the source address whose port should be ignored. Nullptr * if caller wants kernel to select source address. * @param peer_address is the destination address. @@ -202,7 +203,6 @@ class IoHandle { /** * Bind to address. The handle should have been created with a call to socket() * @param address address to bind to. - * @param addrlen address length * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call * is successful, errno_ shouldn't be used. */ @@ -220,7 +220,6 @@ class IoHandle { * Accept on listening handle * @param addr remote address to be returned * @param addrlen remote address length - * @param flags flags to be applied to accepted session * @return accepted IoHandlePtr */ virtual std::unique_ptr accept(struct sockaddr* addr, socklen_t* addrlen) PURE; @@ -229,7 +228,6 @@ class IoHandle { * Connect to address. The handle should have been created with a call to socket() * on this object. * @param address remote address to connect to. - * @param addrlen remote address length * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call * is successful, errno_ shouldn't be used. */ diff --git a/envoy/network/listener.h b/envoy/network/listener.h index ad2b69a6c750..2e4598de0451 100644 --- a/envoy/network/listener.h +++ b/envoy/network/listener.h @@ -10,6 +10,7 @@ #include "envoy/common/resource.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/udp_listener_config.pb.h" +#include "envoy/config/typed_metadata.h" #include "envoy/init/manager.h" #include "envoy/network/connection.h" #include "envoy/network/connection_balancer.h" @@ -454,5 +455,10 @@ class UdpListenerWorkerRouter { using UdpListenerWorkerRouterPtr = std::unique_ptr; +/** + * Base class for all listener typed metadata factories. + */ +class ListenerTypedMetadataFactory : public Envoy::Config::TypedMetadataFactory {}; + } // namespace Network } // namespace Envoy diff --git a/envoy/network/socket.h b/envoy/network/socket.h index 225b76297a06..e57899b7f9ea 100644 --- a/envoy/network/socket.h +++ b/envoy/network/socket.h @@ -299,8 +299,18 @@ class Socket { virtual absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const PURE; + + /** + * Whether the socket implementation is supported. Real implementations should typically return + * true. Placeholder implementations may indicate such by returning false. Note this does NOT + * inherently prevent an option from being applied if it's passed to socket/connection + * interfaces. + * @return Whether this is a supported socket option. + */ + virtual bool isSupported() const PURE; }; + using OptionConstPtr = std::unique_ptr; using OptionConstSharedPtr = std::shared_ptr; using Options = std::vector; using OptionsSharedPtr = std::shared_ptr; diff --git a/envoy/protobuf/message_validator.h b/envoy/protobuf/message_validator.h index 6a2f02c274c7..c31921832796 100644 --- a/envoy/protobuf/message_validator.h +++ b/envoy/protobuf/message_validator.h @@ -56,6 +56,12 @@ class ValidationVisitor { * throw an exception. */ virtual void onDeprecatedField(absl::string_view description, bool soft_deprecation) PURE; + + /** + * Called when a message or field is marked as work in progress or a message is contained in a + * proto file marked as work in progress. + */ + virtual void onWorkInProgress(absl::string_view description) PURE; }; class ValidationContext { diff --git a/envoy/router/router.h b/envoy/router/router.h index b7d8f8db2f3a..c829262d1403 100644 --- a/envoy/router/router.h +++ b/envoy/router/router.h @@ -1299,7 +1299,6 @@ class GenericUpstream { virtual void readDisable(bool disable) PURE; /** * Reset the stream. No events will fire beyond this point. - * @param reason supplies the reset reason. */ virtual void resetStream() PURE; @@ -1308,6 +1307,11 @@ class GenericUpstream { * @param the account to assign the generic upstream. */ virtual void setAccount(Buffer::BufferMemoryAccountSharedPtr account) PURE; + + /** + * Get the bytes meter for this stream. + */ + virtual const StreamInfo::BytesMeterSharedPtr& bytesMeter() PURE; }; using GenericConnPoolPtr = std::unique_ptr; diff --git a/envoy/server/BUILD b/envoy/server/BUILD index ccd2478deea7..5ca3f4c61567 100644 --- a/envoy/server/BUILD +++ b/envoy/server/BUILD @@ -184,6 +184,7 @@ envoy_cc_library( "//envoy/access_log:access_log_interface", "//envoy/api:api_interface", "//envoy/config:typed_config_interface", + "//envoy/config:typed_metadata_interface", "//envoy/grpc:context_interface", "//envoy/http:codes_interface", "//envoy/http:context_interface", diff --git a/envoy/server/factory_context.h b/envoy/server/factory_context.h index 7a05d09b6ac2..6a02ca65dfca 100644 --- a/envoy/server/factory_context.h +++ b/envoy/server/factory_context.h @@ -8,6 +8,7 @@ #include "envoy/common/random_generator.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_config.h" +#include "envoy/config/typed_metadata.h" #include "envoy/grpc/context.h" #include "envoy/http/codes.h" #include "envoy/http/context.h" @@ -89,6 +90,11 @@ class FactoryContextBase { */ virtual Stats::Scope& scope() PURE; + /** + * @return Stats::Scope& the server wide stats scope. + */ + virtual Stats::Scope& serverScope() PURE; + /** * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is * used to allow runtime lockless updates to configuration, etc. across multiple threads. @@ -225,6 +231,12 @@ class FactoryContext : public virtual CommonFactoryContext { */ virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE; + /** + * @return const Envoy::Config::TypedMetadata& return the typed metadata provided in the config + * for this listener. + */ + virtual const Envoy::Config::TypedMetadata& listenerTypedMetadata() const PURE; + /** * @return OverloadManager& the overload manager for the server. */ diff --git a/envoy/singleton/manager.h b/envoy/singleton/manager.h index 995c333f8744..d2f235bdf067 100644 --- a/envoy/singleton/manager.h +++ b/envoy/singleton/manager.h @@ -73,6 +73,15 @@ class Manager { return std::dynamic_pointer_cast(get(name, cb)); } + /** + * This is a non-constructing getter. Use when the caller can deal with instances where + * the singleton being accessed may not have been constructed previously. + * @return InstancePtr the singleton. nullptr if the singleton does not exist. + */ + template std::shared_ptr getTyped(const std::string& name) { + return std::dynamic_pointer_cast(get(name, [] { return nullptr; })); + } + /** * Get a singleton and create it if it does not exist. * @param name supplies the singleton name. Must be registered via RegistrationImpl. diff --git a/envoy/ssl/BUILD b/envoy/ssl/BUILD index d9234abdfafe..3862d837cf75 100644 --- a/envoy/ssl/BUILD +++ b/envoy/ssl/BUILD @@ -85,5 +85,6 @@ envoy_cc_library( "//envoy/network:connection_interface", "//envoy/network:post_io_action_interface", "//envoy/protobuf:message_validator_interface", + "//envoy/server:options_interface", ], ) diff --git a/envoy/ssl/handshaker.h b/envoy/ssl/handshaker.h index 2a9fa737b118..1ed93d46c1a8 100644 --- a/envoy/ssl/handshaker.h +++ b/envoy/ssl/handshaker.h @@ -5,6 +5,7 @@ #include "envoy/network/connection.h" #include "envoy/network/post_io_action.h" #include "envoy/protobuf/message_validator.h" +#include "envoy/server/options.h" #include "openssl/ssl.h" @@ -63,6 +64,11 @@ class HandshakerFactoryContext { public: virtual ~HandshakerFactoryContext() = default; + /** + * @return reference to the server options + */ + virtual const Server::Options& options() const PURE; + /** * @return reference to the Api object */ diff --git a/envoy/stats/histogram.h b/envoy/stats/histogram.h index aa87d59e7360..154d4a83f66c 100644 --- a/envoy/stats/histogram.h +++ b/envoy/stats/histogram.h @@ -104,8 +104,16 @@ class Histogram : public Metric { Bytes, Microseconds, Milliseconds, + Percent, // A percent value stored as fixed-point, where the stored value is divided by + // PercentScale to get the actual value, eg a value of 100% (or 1.0) is encoded as + // PercentScale, 50% is encoded as PercentScale * 0.5. Encoding as fixed-point allows + // enough dynamic range, without needing to support floating-point values in + // histograms. }; + // The scaling factor for Unit::Percent. + static constexpr uint64_t PercentScale = 1000000; + ~Histogram() override = default; /** diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index 1a48d18e2c94..5c4c2b473b61 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -238,6 +238,26 @@ struct UpstreamTiming { absl::optional last_upstream_rx_byte_received_; }; +// Measure the number of bytes sent and received for a stream. +struct BytesMeter { + uint64_t wireBytesSent() const { return wire_bytes_sent_; } + uint64_t wireBytesReceived() const { return wire_bytes_received_; } + uint64_t headerBytesSent() const { return header_bytes_sent_; } + uint64_t headerBytesReceived() const { return header_bytes_received_; } + void addHeaderBytesSent(uint64_t added_bytes) { header_bytes_sent_ += added_bytes; } + void addHeaderBytesReceived(uint64_t added_bytes) { header_bytes_received_ += added_bytes; } + void addWireBytesSent(uint64_t added_bytes) { wire_bytes_sent_ += added_bytes; } + void addWireBytesReceived(uint64_t added_bytes) { wire_bytes_received_ += added_bytes; } + +private: + uint64_t header_bytes_sent_{}; + uint64_t header_bytes_received_{}; + uint64_t wire_bytes_sent_{}; + uint64_t wire_bytes_received_{}; +}; + +using BytesMeterSharedPtr = std::shared_ptr; + /** * Additional information about a completed request for logging. */ @@ -290,13 +310,14 @@ class StreamInfo { * @return std::string& the name of the route. */ virtual const std::string& getRouteName() const PURE; + /** * @param bytes_received denotes number of bytes to add to total received bytes. */ virtual void addBytesReceived(uint64_t bytes_received) PURE; /** - * @return the number of body bytes received in the request. + * @return the number of body bytes received by the stream. */ virtual uint64_t bytesReceived() const PURE; @@ -604,6 +625,32 @@ class StreamInfo { * was never attempted upstream. */ virtual absl::optional attemptCount() const PURE; + + /** + * @return the bytes meter for upstream http stream. + */ + virtual const BytesMeterSharedPtr& getUpstreamBytesMeter() const PURE; + + /** + * @return the bytes meter for downstream http stream. + */ + virtual const BytesMeterSharedPtr& getDownstreamBytesMeter() const PURE; + + /** + * @param upstream_bytes_meter, the bytes meter for upstream http stream. + */ + virtual void setUpstreamBytesMeter(const BytesMeterSharedPtr& upstream_bytes_meter) PURE; + + /** + * @param downstream_bytes_meter, the bytes meter for downstream http stream. + */ + virtual void setDownstreamBytesMeter(const BytesMeterSharedPtr& downstream_bytes_meter) PURE; + + static void syncUpstreamAndDownstreamBytesMeter(StreamInfo& downstream_info, + StreamInfo& upstream_info) { + downstream_info.setUpstreamBytesMeter(upstream_info.getUpstreamBytesMeter()); + upstream_info.setDownstreamBytesMeter(downstream_info.getDownstreamBytesMeter()); + } }; } // namespace StreamInfo diff --git a/envoy/upstream/cluster_factory.h b/envoy/upstream/cluster_factory.h index 9440e374c0ed..a6f5d70e6132 100644 --- a/envoy/upstream/cluster_factory.h +++ b/envoy/upstream/cluster_factory.h @@ -75,6 +75,8 @@ class ClusterFactoryContext : public Server::Configuration::FactoryContextBase { // Server::Configuration::FactoryContextBase Stats::Scope& scope() override { return stats(); } + + Stats::Scope& serverScope() override { return stats(); } }; /** diff --git a/envoy/upstream/load_balancer.h b/envoy/upstream/load_balancer.h index 109e2296ca14..75252fae82d6 100644 --- a/envoy/upstream/load_balancer.h +++ b/envoy/upstream/load_balancer.h @@ -10,6 +10,11 @@ #include "envoy/upstream/upstream.h" namespace Envoy { +namespace Http { +namespace ConnectionPool { +class ConnectionLifetimeCallbacks; +} // namespace ConnectionPool +} // namespace Http namespace Upstream { /** @@ -104,6 +109,14 @@ class LoadBalancerContext { virtual absl::optional overrideHostToSelect() const PURE; }; +/** + * Identifies a specific connection within a pool. + */ +struct SelectedPoolAndConnection { + Envoy::ConnectionPool::Instance& pool_; + const Network::Connection& connection_; +}; + /** * Abstract load balancing interface. */ @@ -126,6 +139,24 @@ class LoadBalancer { * @param context supplies the context which is used in host selection. */ virtual HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) PURE; + + /** + * Returns connection lifetime callbacks that may be used to inform the load balancer of + * connection events. Load balancers which do not intend to track connection lifetime events + * will return nullopt. + * @return optional lifetime callbacks for this load balancer. + */ + virtual OptRef lifetimeCallbacks() PURE; + + /** + * Returns a specific pool and existing connection to be used for the specified host. + * + * @return selected pool and connection to be used, or nullopt if no selection is made, + * for example if no matching connection is found. + */ + virtual absl::optional + selectExistingConnection(LoadBalancerContext* context, const Host& host, + std::vector& hash_key) PURE; }; using LoadBalancerPtr = std::unique_ptr; diff --git a/envoy/upstream/retry.h b/envoy/upstream/retry.h index 9e1a8de57995..7d897cd63738 100644 --- a/envoy/upstream/retry.h +++ b/envoy/upstream/retry.h @@ -101,7 +101,7 @@ class RetryOptionsPredicate { public: struct UpdateOptionsParameters { // Stream info for the previous request attempt that is about to be retried. - const StreamInfo::StreamInfo& retriable_request_stream_info_; + StreamInfo::StreamInfo& retriable_request_stream_info_; // The current upstream socket options that were used for connection pool selection on the // previous attempt, or the result of an updated set of options from a previously run // retry options predicate. diff --git a/envoy/upstream/upstream.h b/envoy/upstream/upstream.h index caebef0f4e88..c3274068c417 100644 --- a/envoy/upstream/upstream.h +++ b/envoy/upstream/upstream.h @@ -553,6 +553,7 @@ class PrioritySet { COUNTER(upstream_cx_http2_total) \ COUNTER(upstream_cx_http3_total) \ COUNTER(upstream_cx_idle_timeout) \ + COUNTER(upstream_cx_max_duration_reached) \ COUNTER(upstream_cx_max_requests) \ COUNTER(upstream_cx_none_healthy) \ COUNTER(upstream_cx_overflow) \ @@ -744,6 +745,11 @@ class ClusterInfo { */ virtual const absl::optional idleTimeout() const PURE; + /** + * @return optional maximum connection duration timeout for manager connections. + */ + virtual const absl::optional maxConnectionDuration() const PURE; + /** * @return how many streams should be anticipated per each current stream. */ @@ -835,6 +841,12 @@ class ClusterInfo { virtual const absl::optional& clusterType() const PURE; + /** + * @return configuration for round robin load balancing, only used if LB type is round robin. + */ + virtual const absl::optional& + lbRoundRobinConfig() const PURE; + /** * @return configuration for least request load balancing, only used if LB type is least request. */ diff --git a/examples/cache/front-envoy.yaml b/examples/cache/front-envoy.yaml index e6111b745eec..4754cbeebfc8 100644 --- a/examples/cache/front-envoy.yaml +++ b/examples/cache/front-envoy.yaml @@ -29,9 +29,9 @@ static_resources: http_filters: - name: "envoy.filters.http.cache" typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3.CacheConfig" typed_config: - "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig" + "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig" - name: envoy.filters.http.router clusters: diff --git a/examples/grpc-bridge/client/Dockerfile b/examples/grpc-bridge/client/Dockerfile index 49425a773bc0..6159167e8e8d 100644 --- a/examples/grpc-bridge/client/Dockerfile +++ b/examples/grpc-bridge/client/Dockerfile @@ -1,11 +1,11 @@ -FROM grpc/python +FROM python:3.8-slim WORKDIR /client COPY requirements.txt /client/requirements.txt # Cache the dependencies -RUN pip install -r /client/requirements.txt +RUN pip install --require-hashes -r /client/requirements.txt # Copy the sources, including the stubs COPY client.py /client/grpc-kv-client.py diff --git a/examples/grpc-bridge/client/client.py b/examples/grpc-bridge/client/client.py index 8bcf29f22cba..e98607cbcfca 100755 --- a/examples/grpc-bridge/client/client.py +++ b/examples/grpc-bridge/client/client.py @@ -19,7 +19,7 @@ """.format(host=HOST) -class KVClient(): +class KVClient: def get(self, key): r = kv.GetRequest(key=key) @@ -40,7 +40,7 @@ def set(self, key, value): return requests.post(HOST + "/kv.KV/Set", data=data, headers=HEADERS) -def run(): +def main(): if len(sys.argv) == 1: print(USAGE) @@ -82,4 +82,4 @@ def run(): if __name__ == '__main__': - run() + main() diff --git a/examples/grpc-bridge/client/requirements.in b/examples/grpc-bridge/client/requirements.in new file mode 100644 index 000000000000..96b06d428c7e --- /dev/null +++ b/examples/grpc-bridge/client/requirements.in @@ -0,0 +1,4 @@ +requests>=2.22.0 +grpcio +grpcio-tools +protobuf>=3.18.0 diff --git a/examples/grpc-bridge/client/requirements.txt b/examples/grpc-bridge/client/requirements.txt index c6c2fd2fa343..0c58c1ec1b48 100644 --- a/examples/grpc-bridge/client/requirements.txt +++ b/examples/grpc-bridge/client/requirements.txt @@ -1,4 +1,155 @@ -requests>=2.22.0 -grpcio -grpcio-tools -protobuf==3.17.3 +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --allow-unsafe --generate-hashes requirements.in +# +certifi==2021.5.30 \ + --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 + # via requests +charset-normalizer==2.0.6 \ + --hash=sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6 \ + --hash=sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f + # via requests +grpcio==1.41.0 \ + --hash=sha256:056806e83eaa09d0af0e452dd353db8f7c90aa2dedcce1112a2d21592550f6b1 \ + --hash=sha256:07594e585a5ba25cf331ddb63095ca51010c34e328a822cb772ffbd5daa62cb5 \ + --hash=sha256:0abd56d90dff3ed566807520de1385126dded21e62d3490a34c180a91f94c1f4 \ + --hash=sha256:15c04d695833c739dbb25c88eaf6abd9a461ec0dbd32f44bc8769335a495cf5a \ + --hash=sha256:1820845e7e6410240eff97742e9f76cd5bf10ca01d36a322e86c0bd5340ac25b \ + --hash=sha256:1bcbeac764bbae329bc2cc9e95d0f4d3b0fb456b92cf12e7e06e3e860a4b31cf \ + --hash=sha256:2410000eb57cf76b05b37d2aee270b686f0a7876710850a2bba92b4ed133e026 \ + --hash=sha256:2882b62f74de8c8a4f7b2be066f6230ecc46f4edc8f42db1fb7358200abe3b25 \ + --hash=sha256:297ee755d3c6cd7e7d3770f298f4d4d4b000665943ae6d2888f7407418a9a510 \ + --hash=sha256:39ce785f0cbd07966a9019386b7a054615b2da63da3c7727f371304d000a1890 \ + --hash=sha256:3a92e4df5330cd384984e04804104ae34f521345917813aa86fc0930101a3697 \ + --hash=sha256:3bbeee115b05b22f6a9fa9bc78f9ab8d9d6bb8c16fdfc60401fc8658beae1099 \ + --hash=sha256:4537bb9e35af62c5189493792a8c34d127275a6d175c8ad48b6314cacba4021e \ + --hash=sha256:462178987f0e5c60d6d1b79e4e95803a4cd789db961d6b3f087245906bb5ae04 \ + --hash=sha256:5292a627b44b6d3065de4a364ead23bab3c9d7a7c05416a9de0c0624d0fe03f4 \ + --hash=sha256:5502832b7cec670a880764f51a335a19b10ff5ab2e940e1ded67f39b88aa02b1 \ + --hash=sha256:585847ed190ea9cb4d632eb0ebf58f1d299bbca5e03284bc3d0fa08bab6ea365 \ + --hash=sha256:59645b2d9f19b5ff30cb46ddbcaa09c398f9cd81e4e476b21c7c55ae1e942807 \ + --hash=sha256:5d4b30d068b022e412adcf9b14c0d9bcbc872e9745b91467edc0a4c700a8bba6 \ + --hash=sha256:7033199706526e7ee06a362e38476dfdf2ddbad625c19b67ed30411d1bb25a18 \ + --hash=sha256:7b07cbbd4eea56738e995fcbba3b60e41fd9aa9dac937fb7985c5dcbc7626260 \ + --hash=sha256:7da3f6f6b857399c9ad85bcbffc83189e547a0a1a777ab68f5385154f8bc1ed4 \ + --hash=sha256:83c1e731c2b76f26689ad88534cafefe105dcf385567bead08f5857cb308246b \ + --hash=sha256:9674a9d3f23702e35a89e22504f41b467893cf704f627cc9cdd118cf1dcc8e26 \ + --hash=sha256:9ecd0fc34aa46eeac24f4d20e67bafaf72ca914f99690bf2898674905eaddaf9 \ + --hash=sha256:a0c4bdd1d646365d10ba1468bcf234ea5ad46e8ce2b115983e8563248614910a \ + --hash=sha256:a144f6cecbb61aace12e5920840338a3d246123a41d795e316e2792e9775ad15 \ + --hash=sha256:a3cd7f945d3e3b82ebd2a4c9862eb9891a5ac87f84a7db336acbeafd86e6c402 \ + --hash=sha256:a614224719579044bd7950554d3b4c1793bb5715cbf0f0399b1f21d283c40ef6 \ + --hash=sha256:ace080a9c3c673c42adfd2116875a63fec9613797be01a6105acf7721ed0c693 \ + --hash=sha256:b2de4e7b5a930be04a4d05c9f5fce7e9191217ccdc174b026c2a7928770dca9f \ + --hash=sha256:b6b68c444abbaf4a2b944a61cf35726ab9645f45d416bcc7cf4addc4b2f2d53d \ + --hash=sha256:be3c6ac822edb509aeef41361ca9c8c5ee52cb9e4973e1977d2bb7d6a460fd97 \ + --hash=sha256:c07acd49541f5f6f9984fe0adf162d77bf70e0f58e77f9960c6f571314ff63a4 \ + --hash=sha256:c1e0a4c86d4cbd93059d5eeceed6e1c2e3e1494e1bf40be9b8ab14302c576162 \ + --hash=sha256:c8c5bc498f6506b6041c30afb7a55c57a9fd535d1a0ac7cdba9b5fd791a85633 \ + --hash=sha256:c95dd6e60e059ff770a2ac9f5a202b75dd64d76b0cd0c48f27d58907e43ed6a6 \ + --hash=sha256:ccd2f1cf11768d1f6fbe4e13e8b8fb0ccfe9914ceeff55a367d5571e82eeb543 \ + --hash=sha256:d0cc0393744ce3ce1b237ae773635cc928470ff46fb0d3f677e337a38e5ed4f6 \ + --hash=sha256:d539ebd05a2bbfbf897d41738d37d162d5c3d9f2b1f8ddf2c4f75e2c9cf59907 \ + --hash=sha256:d71aa430b2ac40e18e388504ac34cc91d49d811855ca507c463a21059bf364f0 \ + --hash=sha256:dcb5f324712a104aca4a459e524e535f205f36deb8005feb4f9d3ff0a22b5177 \ + --hash=sha256:e516124010ef60d5fc2e0de0f1f987599249dc55fd529001f17f776a4145767f \ + --hash=sha256:fb64abf0d92134cb0ba4496a3b7ab918588eee42de20e5b3507fe6ee16db97ee + # via + # -r requirements.in + # grpcio-tools +grpcio-tools==1.41.0 \ + --hash=sha256:022ea466300fd8eee03375795c764b8d01aee7ba614c1d7ba198eef9eaebc07a \ + --hash=sha256:05730f1acd3fa70e63a62fe37377297774db7f4794fb6ae3e43f64aa354460f8 \ + --hash=sha256:08654c9f723fa644be52cc8f975c01bb93a99808ab02c2e64a20e9c9e92c9a3b \ + --hash=sha256:0d6489ed1310250f152d6170ee539e84bfc364bbfdffbbe98e8ce9297c4a1550 \ + --hash=sha256:17a759203f627b941086a65a0c3f39c5da41f11d11dc8ca5883e844c055876dd \ + --hash=sha256:2d48309bbbb2d7144117748718ca52eb60f10dd86a0cb8a0a5f952ee08575bee \ + --hash=sha256:3891b1df82369acbc8451d4952cd20755f49a82398dce62437511ad17b47290e \ + --hash=sha256:3c7f6c8559ac6bea6029b8c5d188d24509d30a28816de02c723659f56e862b98 \ + --hash=sha256:3f6c2bff12e2015bd69c600710fb427720446034ed9a237cd6edf7e2452cf826 \ + --hash=sha256:3f860f8a804f6ef6ea545483c1506d184f9bba40f635c6886d79791822c679e3 \ + --hash=sha256:4b48c13dbbf96d36a41e45fd011eeabc1541ec8705f2d533fa4c20634f750885 \ + --hash=sha256:50a9f66502e4868c20bc0b8c1c7d3b21e6b6b2578a7aef6ce7c28294b9eba911 \ + --hash=sha256:51bdc4bd088592d5f52b5cb6d3be072bf0d847a7af92e544f9885acdf5de1252 \ + --hash=sha256:55915c61baae316b607be6ff5be72614efc067e50dfffd389bde95c240a5416e \ + --hash=sha256:57f35fd71366f1eecd4c08b9d8eda1007d371827f092ae916b4235744e9175a6 \ + --hash=sha256:5b1edfcfa4f21c210bfe66534af9fa5ca37374bb0e0d1754018e0d92c8fe4c8e \ + --hash=sha256:5d15f5dd0c01f914ab15e921484b71aff0eff8aa123b22d76e71c76be8d81efc \ + --hash=sha256:5f52f7d8841372a047493ee9722810856a4adfa38330b4a688a1421dd3460518 \ + --hash=sha256:5f85be3053486cc53b41fe888957f61e98d6aab74b0726a54cf35e4a685f2b96 \ + --hash=sha256:602b7dd5e52924794f19f637ec042bc141b7d9dd127ddc662b28c42f8db08e95 \ + --hash=sha256:609f6e4cad800f0b2caa0b46baefbb30444bddfc94d1429b9add02d5e6759001 \ + --hash=sha256:6622feec0a3f326fb86cf01bf1bcbfec23548ae4d80706d88b296d792d816f0e \ + --hash=sha256:7145e9243718bd8a4792547efb1443846cebb3d36d49dca52d5f9edfb81aa256 \ + --hash=sha256:7242b39d16970319b11c13832f3474d09be53cbc88bc05c54140f5394a247184 \ + --hash=sha256:731c78b612ca672af0f4682e68d331d304a3eccd1836f0b89402c332aa653815 \ + --hash=sha256:7f3bf213d7b182628bdfb10854cc7b19d4882e1916786fc3a14f724555a7e824 \ + --hash=sha256:85b4cd4a77c27df984dce5b14eafa29c54abd134335230b59fa8d096c995b877 \ + --hash=sha256:898b032ddcd25a051c6c8892b76779b8821e073fc363e6105dc08efd95857bcd \ + --hash=sha256:8cf6ab58c14b7bd4cf5b4d652e2bfafc6543d38210d68332ccccff4733bcc615 \ + --hash=sha256:8f7cd5b8eeae570743cfd0ece36f62b32424b995ee3862697cfe94bc9c4fa5fe \ + --hash=sha256:98d9e581bc9ad154697af40c0109221926628d57fab2a52a1fa2cfed401349d5 \ + --hash=sha256:9ff9fdef6df6b3d1e4395158f4bd2bfab58867370bd4b4ed81a1a2ab20de085b \ + --hash=sha256:a111af9732c1ac85b35b894c4b6150127c52349ca220c0708d241d4bb8ee4622 \ + --hash=sha256:a1e2db4c90cb07d6b8f1526346df65da85dce995e7aa7c4db76bcc2a99dcbf43 \ + --hash=sha256:a4e08366f780b439499645fbb0b7788cccd978c06158b19e915726bfbe420031 \ + --hash=sha256:b78a3225302b60e59a922d909413b2c0de2ba19f4dc79273411dfad560e21418 \ + --hash=sha256:b8e9181327b94886f6214cfe2147721c6b60138c111d78313b9070f4068020b5 \ + --hash=sha256:c13b6a37fe3619be603265a14a614f86fa97a95934e6447de2bc9e66f9a35590 \ + --hash=sha256:c93137598d5f2b4d163aff571197be92d3c691a5d82dabb29b1ef467e3c29db6 \ + --hash=sha256:ceefaa88c066c9c779f15e8d58d57d3763efef3d0dbec483be99bc75ae0e2d70 \ + --hash=sha256:db64aa08ae500cb20c9f377e41a66e493c4cba27ab99710852340ef81c7d0e30 \ + --hash=sha256:dc65beee944735d4cb42c8c43e284ff711512d1f7a029bdbaeb0729243f3a702 \ + --hash=sha256:e1814b98a955aad08107eb4c4f068b1cd147cc923a2480bc2fae51007bb7866b \ + --hash=sha256:f4c03f312877e57b47beda2e9db5a39bc3af65ee22b38e85b4c0f94b3b9c26af + # via -r requirements.in +idna==3.2 \ + --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 + # via requests +protobuf==3.18.1 \ + --hash=sha256:0851b5b89191e1976d34fa2e8eb8659829dfb45252053224cf9df857fb5f6a45 \ + --hash=sha256:09d9268f6f9da81b7657adcf2fb397524c82f20cdf9e0db3ff4e7567977abd67 \ + --hash=sha256:10544fc7ace885a882623083c24da5b14148c77563acddc3c58d66f6153c09cd \ + --hash=sha256:1c9bb40503751087300dd12ce2e90899d68628977905c76effc48e66d089391e \ + --hash=sha256:387f621bf7295a331f8c8a6962d097ceddeb85356792888cfa6a5c6bfc6886a4 \ + --hash=sha256:3c1644f8a7f19b45c7a4c32278e2a55ae9e7e2f9e5f02d960a61f04a4890d3e6 \ + --hash=sha256:4d19c9cb805fd2be1d59eee39e152367ee92a30167e77bd06c8819f8f0009a4c \ + --hash=sha256:61ca58e14033ca0dfa484a31d57237c1be3b6013454c7f53876a20fc88dd69b1 \ + --hash=sha256:6f714f5de9d40b3bec90ede4a688cce52f637ccdc5403afcda1f67598f4fdcd7 \ + --hash=sha256:7a7be937c319146cc9f2626f0181e6809062c353e1fe449ecd0df374ba1036b2 \ + --hash=sha256:7e2f0677d68ecdd1cfda2abea65873f5bc7c3f5aae199404a3f5c1d1198c1a63 \ + --hash=sha256:8c1c5d3966c856f60a9d8d62f4455d70c31026422acdd5c228edf22b65b16c38 \ + --hash=sha256:93bad12895d8b0ebc66b605c2ef1802311595f881aef032d9f13282b7550e6b2 \ + --hash=sha256:c0e2790c580070cff2921b93d562539ae027064340151c50db6aaf94c33048cd \ + --hash=sha256:c492c217d3f69f4d2d5619571e52ab98538edbf53caf67e53ea92bd0a3b5670f \ + --hash=sha256:d6d927774c0ec746fed15a4faff5f44aad0b7a3421fadb6f3ae5ca1f2f8ae26e \ + --hash=sha256:d76201380f41a2d83fb613a4683059d1fcafbe969518b3e409e279a8788fde2f \ + --hash=sha256:e2ee8b11e3eb2ed38f12137c3c132270a0b1dd509e317228ac47b67f21a583f1 \ + --hash=sha256:e9ac691f7b24e4371dcd3980e4f5d6c840a2010da37986203053fee995786ec5 \ + --hash=sha256:f20f803892f2135e8b96dc58c9a0c6a7ad8436794bf8784af229498d939b4c77 \ + --hash=sha256:fa6d1049d5315566f55c04d0b50c0033415144f96a9d25c820dc542fe2bb7f45 + # via + # -r requirements.in + # grpcio-tools +requests==2.26.0 \ + --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ + --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 + # via -r requirements.in +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via grpcio +urllib3==1.26.7 \ + --hash=sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece \ + --hash=sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844 + # via requests + +# The following packages are considered to be unsafe in a requirements file: +setuptools==58.2.0 \ + --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11 \ + --hash=sha256:2c55bdb85d5bb460bd2e3b12052b677879cffcf46c0c688f2e5bf51d36001145 + # via grpcio-tools diff --git a/examples/wasm-cc/docker-compose-wasm.yaml b/examples/wasm-cc/docker-compose-wasm.yaml index 2d88bf48f173..537b46d2485e 100644 --- a/examples/wasm-cc/docker-compose-wasm.yaml +++ b/examples/wasm-cc/docker-compose-wasm.yaml @@ -2,7 +2,7 @@ version: "3.7" services: wasm_compile_update: - image: envoyproxy/envoy-build-ubuntu:8ca107a75ee98b255aa59db2ab40fd0800a3ce99 + image: envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 command: | bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_updated_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source @@ -11,7 +11,7 @@ services: - ./lib:/build wasm_compile: - image: envoyproxy/envoy-build-ubuntu:8ca107a75ee98b255aa59db2ab40fd0800a3ce99 + image: envoyproxy/envoy-build-ubuntu:81a93046060dbe5620d5b3aa92632090a9ee4da6 command: | bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source diff --git a/generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto b/generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto deleted file mode 100644 index aa2d8cd2fb82..000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha/cryptomb.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.private_key_providers.cryptomb.v3alpha; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.private_key_providers.cryptomb.v3alpha"; -option java_outer_classname = "CryptombProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CryptoMb private key provider] -// [#extension: envoy.tls.key_providers.cryptomb] - -// A CryptoMbPrivateKeyMethodConfig message specifies how the CryptoMb private -// key provider is configured. The private key provider provides `SIMD` -// processing for RSA sign and decrypt operations (ECDSA signing uses regular -// BoringSSL functions). The provider works by gathering the operations into a -// worker-thread specific queue, and processing the queue using `ipp-crypto` -// library when the queue is full or when a timer expires. -// [#extension-category: envoy.tls.key_providers] -message CryptoMbPrivateKeyMethodConfig { - // Private key to use in the private key provider. If set to inline_bytes or - // inline_string, the value needs to be the private key in PEM format. - config.core.v3.DataSource private_key = 1 [(udpa.annotations.sensitive) = true]; - - // How long to wait until the per-thread processing queue should be - // processed. If the processing queue gets full (eight sign or decrypt - // requests are received) it is processed immediately. However, if the - // queue is not filled before the delay has expired, the requests - // already in the queue are processed, even if the queue is not full. - // In effect, this value controls the balance between latency and - // throughput. The duration needs to be set to a non-zero value. - google.protobuf.Duration poll_delay = 2 [(validate.rules).duration = { - required: true - gt {} - }]; -} diff --git a/repokitteh.star b/repokitteh.star index 57c90e3599eb..c5aa3ddf66f1 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -18,22 +18,22 @@ use( }, { "owner": "envoyproxy/api-shepherds!", - "path": "api/envoy/", + "path": "(api/envoy/|docs/root/api-docs/)", "label": "api", "github_status_label": "any API change", "auto_assign": True, }, { "owner": "envoyproxy/api-watchers", - "path": "api/envoy/", + "path": "(api/envoy/|docs/root/api-docs/)", }, { "owner": "envoyproxy/dependency-shepherds!", "path": "(bazel/.*repos.*\.bzl)|(bazel/dependency_imports\.bzl)|(api/bazel/.*\.bzl)|(.*/requirements\.txt)|(.*\.patch)", "label": "deps", - "allow_global_approval": False, "github_status_label": "any dependency change", + "auto_assign": True, }, ], ) diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 12152a927302..7368b4880e1b 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -397,7 +397,7 @@ class Slice { /** Length of the byte array that base_ points to. This is also the offset in bytes from the start * of the slice to the end of the Reservable section. */ - uint64_t capacity_; + uint64_t capacity_ = 0; /** Backing storage for mutable slices which own their own storage. This storage should never be * accessed directly; access base_ instead. */ @@ -407,11 +407,11 @@ class Slice { uint8_t* base_{nullptr}; /** Offset in bytes from the start of the slice to the start of the Data section. */ - uint64_t data_; + uint64_t data_ = 0; /** Offset in bytes from the start of the slice to the start of the Reservable section which is * also the end of the Data section. */ - uint64_t reservable_; + uint64_t reservable_ = 0; /** Hooks to execute when the slice is destroyed. */ std::list> drain_trackers_; diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 95cd369c82c8..ab1590deaa0a 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -67,7 +67,7 @@ class WatermarkBuffer : public OwnedImpl { uint32_t low_watermark_{0}; uint32_t overflow_watermark_{0}; // Tracks the latest state of watermark callbacks. - // True between the time above_high_watermark_ has been called until above_high_watermark_ has + // True between the time above_high_watermark_ has been called until below_low_watermark_ has // been called. bool above_high_watermark_called_{false}; // Set to true when above_overflow_watermark_ is called (and isn't cleared). diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD deleted file mode 100644 index 0529a808f139..000000000000 --- a/source/common/chromium_url/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "chromium_url", - srcs = [ - "url_canon.cc", - "url_canon_internal.cc", - "url_canon_path.cc", - "url_canon_stdstring.cc", - ], - hdrs = [ - "envoy_shim.h", - "url_canon.h", - "url_canon_internal.h", - "url_canon_stdstring.h", - "url_parse.h", - "url_parse_internal.h", - ], - deps = [ - "//source/common/common:assert_lib", - "//source/common/common:mem_block_builder_lib", - ], -) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE deleted file mode 100644 index a32e00ce6be3..000000000000 --- a/source/common/chromium_url/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Chromium Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md deleted file mode 100644 index 32e251c82d4d..000000000000 --- a/source/common/chromium_url/README.md +++ /dev/null @@ -1,16 +0,0 @@ -This is a manually minified variant of -https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, -providing just the parts needed for `url::CanonicalizePath()`. This is intended -to support a security release fix for CVE-2019-9901. Long term we need this to -be moved to absl or QUICHE for upgrades and long-term support. - -Some specific transforms of interest: -* The namespace `url` was changed to `chromium_url`. -* `url_parse.h` is minified to just `Component` and flattened back into the URL - directory. It does not contain any non-Chromium authored code any longer and - so does not have a separate LICENSE. -* `envoy_shim.h` adapts various macros to the Envoy context. -* Anything not reachable from `url::CanonicalizePath()` has been dropped. -* Header include paths have changed as needed. -* BUILD was manually written. -* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h deleted file mode 100644 index c581e21d45bf..000000000000 --- a/source/common/chromium_url/envoy_shim.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "source/common/common/assert.h" - -// This is a minimal Envoy adaptation layer for the Chromium URL library. -// NOLINT(namespace-envoy) - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -#define EXPORT_TEMPLATE_DECLARE(x) -#define EXPORT_TEMPLATE_DEFINE(x) -#define COMPONENT_EXPORT(x) - -#define DCHECK(x) ASSERT(x) -#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc deleted file mode 100644 index 79b36e986ca5..000000000000 --- a/source/common/chromium_url/url_canon.cc +++ /dev/null @@ -1,16 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "source/common/chromium_url/url_canon.h" - -#include "source/common/chromium_url/envoy_shim.h" - -namespace chromium_url { - -template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h deleted file mode 100644 index d56346d5ee16..000000000000 --- a/source/common/chromium_url/url_canon.h +++ /dev/null @@ -1,187 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_H_ -#define URL_URL_CANON_H_ - -#include -#include - -#include "source/common/chromium_url/envoy_shim.h" -#include "source/common/chromium_url/url_parse.h" -#include "source/common/common/mem_block_builder.h" - -namespace chromium_url { - -// Canonicalizer output ------------------------------------------------------- - -// Base class for the canonicalizer output, this maintains a buffer and -// supports simple resizing and append operations on it. -// -// It is VERY IMPORTANT that no virtual function calls be made on the common -// code path. We only have two virtual function calls, the destructor and a -// resize function that is called when the existing buffer is not big enough. -// The derived class is then in charge of setting up our buffer which we will -// manage. -template class CanonOutputT { -public: - CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} - virtual ~CanonOutputT() = default; - - // Implemented to resize the buffer. This function should update the buffer - // pointer to point to the new buffer, and any old data up to |cur_len_| in - // the buffer must be copied over. - // - // The new size |sz| must be larger than buffer_len_. - virtual void Resize(int sz) = 0; - - // Accessor for returning a character at a given position. The input offset - // must be in the valid range. - inline T at(int offset) const { return buffer_[offset]; } - - // Sets the character at the given position. The given position MUST be less - // than the length(). - inline void set(int offset, T ch) { buffer_[offset] = ch; } - - // Returns the number of characters currently in the buffer. - inline int length() const { return cur_len_; } - - // Returns the current capacity of the buffer. The length() is the number of - // characters that have been declared to be written, but the capacity() is - // the number that can be written without reallocation. If the caller must - // write many characters at once, it can make sure there is enough capacity, - // write the data, then use set_size() to declare the new length(). - int capacity() const { return buffer_len_; } - - // Called by the user of this class to get the output. The output will NOT - // be NULL-terminated. Call length() to get the - // length. - const T* data() const { return buffer_; } - T* data() { return buffer_; } - - // Shortens the URL to the new length. Used for "backing up" when processing - // relative paths. This can also be used if an external function writes a lot - // of data to the buffer (when using the "Raw" version below) beyond the end, - // to declare the new length. - // - // This MUST NOT be used to expand the size of the buffer beyond capacity(). - void set_length(int new_len) { cur_len_ = new_len; } - - // This is the most performance critical function, since it is called for - // every character. - void push_back(T ch) { - // In VC2005, putting this common case first speeds up execution - // dramatically because this branch is predicted as taken. - if (cur_len_ < buffer_len_) { - buffer_[cur_len_] = ch; - cur_len_++; - return; - } - - // Grow the buffer to hold at least one more item. Hopefully we won't have - // to do this very often. - if (!Grow(1)) - return; - - // Actually do the insertion. - buffer_[cur_len_] = ch; - cur_len_++; - } - - // Appends the given string to the output. - void Append(const T* str, int str_len) { - if (cur_len_ + str_len > buffer_len_) { - if (!Grow(cur_len_ + str_len - buffer_len_)) - return; - } - for (int i = 0; i < str_len; i++) - buffer_[cur_len_ + i] = str[i]; - cur_len_ += str_len; - } - - void ReserveSizeIfNeeded(int estimated_size) { - // Reserve a bit extra to account for escaped chars. - if (estimated_size > buffer_len_) - Resize(estimated_size + 8); - } - -protected: - // Grows the given buffer so that it can fit at least |min_additional| - // characters. Returns true if the buffer could be resized, false on OOM. - bool Grow(int min_additional) { - static const int kMinBufferLen = 16; - int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; - do { - if (new_len >= (1 << 30)) // Prevent overflow below. - return false; - new_len *= 2; - } while (new_len < buffer_len_ + min_additional); - Resize(new_len); - return true; - } - - T* buffer_; - int buffer_len_; - - // Used characters in the buffer. - int cur_len_; -}; - -// Simple implementation of the CanonOutput using new[]. This class -// also supports a static buffer so if it is allocated on the stack, most -// URLs can be canonicalized with no heap allocations. -template class RawCanonOutputT : public CanonOutputT { -public: - RawCanonOutputT() : CanonOutputT() { - this->buffer_ = fixed_buffer_; - this->buffer_len_ = fixed_capacity; - } - ~RawCanonOutputT() override { - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - } - - void Resize(int sz) override { - Envoy::MemBlockBuilder new_buf(sz); - new_buf.appendData(absl::Span(this->buffer, std::min(this->cur_len_, sz))); - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - this->buffer_ = new_buf.releasePointer(); - this->buffer_len_ = sz; - } - -protected: - T fixed_buffer_[fixed_capacity]; -}; - -// Explicitly instantiate commonly used instantiations. -extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; - -// Normally, all canonicalization output is in narrow characters. We support -// the templates so it can also be used internally if a wide buffer is -// required. -using CanonOutput = CanonOutputT; - -template -class RawCanonOutput : public RawCanonOutputT {}; - -// Path. If the input does not begin in a slash (including if the input is -// empty), we'll prepend a slash to the path to make it canonical. -// -// The 8-bit version assumes UTF-8 encoding, but does not verify the validity -// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid -// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't -// an issue. Somebody giving us an 8-bit path is responsible for generating -// the path that the server expects (we'll escape high-bit characters), so -// if something is invalid, it's their problem. -COMPONENT_EXPORT(URL) -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path); - -} // namespace chromium_url - -#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc deleted file mode 100644 index ee29b04ef162..000000000000 --- a/source/common/chromium_url/url_canon_internal.cc +++ /dev/null @@ -1,295 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "source/common/chromium_url/url_canon_internal.h" - -namespace chromium_url { - -// See the header file for this array's declaration. -const unsigned char kSharedCharTypeTable[0x100] = { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x00 - 0x0f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x10 - 0x1f - 0, // 0x20 ' ' (escape spaces in queries) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! - 0, // 0x22 " - 0, // 0x23 # (invalid in query since it marks the ref) - CHAR_QUERY | CHAR_USERINFO, // 0x24 $ - CHAR_QUERY | CHAR_USERINFO, // 0x25 % - CHAR_QUERY | CHAR_USERINFO, // 0x26 & - 0, // 0x27 ' (Try to prevent XSS.) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * - CHAR_QUERY | CHAR_USERINFO, // 0x2b + - CHAR_QUERY | CHAR_USERINFO, // 0x2c , - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . - CHAR_QUERY, // 0x2f / - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x30 0 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x31 1 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x32 2 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x33 3 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x34 4 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x35 5 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x36 6 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x37 7 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 - CHAR_QUERY, // 0x3a : - CHAR_QUERY, // 0x3b ; - 0, // 0x3c < (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3d = - 0, // 0x3e > (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3f ? - CHAR_QUERY, // 0x40 @ - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z - CHAR_QUERY, // 0x5b [ - CHAR_QUERY, // 0x5c '\' - CHAR_QUERY, // 0x5d ] - CHAR_QUERY, // 0x5e ^ - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ - CHAR_QUERY, // 0x60 ` - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z - CHAR_QUERY, // 0x7b { - CHAR_QUERY, // 0x7c | - CHAR_QUERY, // 0x7d } - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ - 0, // 0x7f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x80 - 0x8f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x90 - 0x9f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xa0 - 0xaf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xb0 - 0xbf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xc0 - 0xcf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xd0 - 0xdf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xe0 - 0xef - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xf0 - 0xff -}; - -const char kHexCharLookup[0x10] = { - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', -}; - -const char kCharToHexLookup[8] = { - 0, // 0x00 - 0x1f - '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 - 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 - 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 - 0, // 0x80 - 0x9F - 0, // 0xA0 - 0xBF - 0, // 0xC0 - 0xDF - 0, // 0xE0 - 0xFF -}; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h deleted file mode 100644 index 84c761a6a068..000000000000 --- a/source/common/chromium_url/url_canon_internal.h +++ /dev/null @@ -1,204 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_INTERNAL_H_ -#define URL_URL_CANON_INTERNAL_H_ - -// This file is intended to be included in another C++ file where the character -// types are defined. This allows us to write mostly generic code, but not have -// template bloat because everything is inlined when anybody calls any of our -// functions. - -#include -#include - -#include "source/common/chromium_url/envoy_shim.h" -#include "source/common/chromium_url/url_canon.h" - -namespace chromium_url { - -// Character type handling ----------------------------------------------------- - -// Bits that identify different character types. These types identify different -// bits that are set for each 8-bit character in the kSharedCharTypeTable. -enum SharedCharTypes { - // Characters that do not require escaping in queries. Characters that do - // not have this flag will be escaped; see url_canon_query.cc - CHAR_QUERY = 1, - - // Valid in the username/password field. - CHAR_USERINFO = 2, - - // Valid in a IPv4 address (digits plus dot and 'x' for hex). - CHAR_IPV4 = 4, - - // Valid in an ASCII-representation of a hex digit (as in %-escaped). - CHAR_HEX = 8, - - // Valid in an ASCII-representation of a decimal digit. - CHAR_DEC = 16, - - // Valid in an ASCII-representation of an octal digit. - CHAR_OCT = 32, - - // Characters that do not require escaping in encodeURIComponent. Characters - // that do not have this flag will be escaped; see url_util.cc. - CHAR_COMPONENT = 64, -}; - -// This table contains the flags in SharedCharTypes for each 8-bit character. -// Some canonicalization functions have their own specialized lookup table. -// For those with simple requirements, we have collected the flags in one -// place so there are fewer lookup tables to load into the CPU cache. -// -// Using an unsigned char type has a small but measurable performance benefit -// over using a 32-bit number. -extern const unsigned char kSharedCharTypeTable[0x100]; - -// More readable wrappers around the character type lookup table. -inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { - return !!(kSharedCharTypeTable[c] & type); -} -inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } -inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } -inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } -inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } - -// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit -// that will be used to represent it. -COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; - -// This lookup table allows fast conversion between ASCII hex letters and their -// corresponding numerical value. The 8-bit range is divided up into 8 -// regions of 0x20 characters each. Each of the three character types (numbers, -// uppercase, lowercase) falls into different regions of this range. The table -// contains the amount to subtract from characters in that range to get at -// the corresponding numerical value. -// -// See HexDigitToValue for the lookup. -extern const char kCharToHexLookup[8]; - -// Assumes the input is a valid hex digit! Call IsHexChar before using this. -inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } - -// Indicates if the given character is a dot or dot equivalent, returning the -// number of characters taken by it. This will be one for a literal dot, 3 for -// an escaped dot. If the character is not a dot, this will return 0. -template inline int IsDot(const CHAR* spec, int offset, int end) { - if (spec[offset] == '.') { - return 1; - } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && - (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { - // Found "%2e" - return 3; - } - return 0; -} - -// Write a single character, escaped, to the output. This always escapes: it -// does no checking that thee character requires escaping. -// Escaping makes sense only 8 bit chars, so code works in all cases of -// input parameters (8/16bit). -template -inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { - output->push_back('%'); - output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); - output->push_back(kHexCharLookup[ch & 0xf]); -} - -// UTF-8 functions ------------------------------------------------------------ - -// Generic To-UTF-8 converter. This will call the given append method for each -// character that should be appended, with the given output method. Wrappers -// are provided below for escaped and non-escaped versions of this. -// -// The char_value must have already been checked that it's a valid Unicode -// character. -template -inline void DoAppendUTF8(unsigned char_value, Output* output) { - if (char_value <= 0x7f) { - Appender(static_cast(char_value), output); - } else if (char_value <= 0x7ff) { - // 110xxxxx 10xxxxxx - Appender(static_cast(0xC0 | (char_value >> 6)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0xffff) { - // 1110xxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xe0 | (char_value >> 12)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0x10FFFF) { // Max Unicode code point. - // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xf0 | (char_value >> 18)), output); - Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else { - // Invalid UTF-8 character (>20 bits). - NOTREACHED(); - } -} - -// Helper used by AppendUTF8Value below. We use an unsigned parameter so there -// are no funny sign problems with the input, but then have to convert it to -// a regular char for appending. -inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { - output->push_back(static_cast(ch)); -} - -// Writes the given character to the output as UTF-8. This does NO checking -// of the validity of the Unicode characters; the caller should ensure that -// the value it is appending is valid to append. -inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Writes the given character to the output as UTF-8, escaping ALL -// characters (even when they are ASCII). This does NO checking of the -// validity of the Unicode characters; the caller should ensure that the value -// it is appending is valid to append. -inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Given a '%' character at |*begin| in the string |spec|, this will decode -// the escaped value and put it into |*unescaped_value| on success (returns -// true). On failure, this will return false, and will not write into -// |*unescaped_value|. -// -// |*begin| will be updated to point to the last character of the escape -// sequence so that when called with the index of a for loop, the next time -// through it will point to the next character to be considered. On failure, -// |*begin| will be unchanged. -inline bool Is8BitChar(char /*c*/) { - return true; // this case is specialized to avoid a warning -} - -template -inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { - if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { - // Invalid escape sequence because there's not enough room, or the - // digits are not ASCII. - return false; - } - - unsigned char first = static_cast(spec[*begin + 1]); - unsigned char second = static_cast(spec[*begin + 2]); - if (!IsHexChar(first) || !IsHexChar(second)) { - // Invalid hex digits, fail. - return false; - } - - // Valid escape sequence. - *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); - *begin += 2; - return true; -} - -} // namespace chromium_url - -#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc deleted file mode 100644 index 17eec73510db..000000000000 --- a/source/common/chromium_url/url_canon_path.cc +++ /dev/null @@ -1,413 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include "source/common/chromium_url/url_canon.h" -#include "source/common/chromium_url/url_canon_internal.h" -#include "source/common/chromium_url/url_parse_internal.h" - -namespace chromium_url { - -namespace { - -enum CharacterFlags { - // Pass through unchanged, whether escaped or unescaped. This doesn't - // actually set anything so you can't OR it to check, it's just to make the - // table below more clear when neither ESCAPE or UNESCAPE is set. - PASS = 0, - - // This character requires special handling in DoPartialPath. Doing this test - // first allows us to filter out the common cases of regular characters that - // can be directly copied. - SPECIAL = 1, - - // This character must be escaped in the canonical output. Note that all - // escaped chars also have the "special" bit set so that the code that looks - // for this is triggered. Not valid with PASS or ESCAPE - ESCAPE_BIT = 2, - ESCAPE = ESCAPE_BIT | SPECIAL, - - // This character must be unescaped in canonical output. Not valid with - // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these - // characters unescaped, they should just be copied. - UNESCAPE = 4, - - // This character is disallowed in URLs. Note that the "special" bit is also - // set to trigger handling. - INVALID_BIT = 8, - INVALID = INVALID_BIT | SPECIAL, -}; - -// This table contains one of the above flag values. Note some flags are more -// than one bits because they also turn on the "special" flag. Special is the -// only flag that may be combined with others. -// -// This table is designed to match exactly what IE does with the characters. -// -// Dot is even more special, and the escaped version is handled specially by -// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" -// bit is never handled (we just need the "special") bit. -const unsigned char kPathCharLookup[0x100] = { - // NULL control chars... - INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // control chars... - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // ' ' ! " # $ % & ' ( ) * - // + , - . / - ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, - SPECIAL, PASS, - // 0 1 2 3 4 5 6 7 8 9 : - // ; < = > ? - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, - // @ A B C D E F G H I J - // K L M N O - PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // P Q R S T U V W X Y Z - // [ \ ] ^ _ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, - // ` a b c d e f g h i j - // k l m n o - ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // p q r s t u v w x y z - // { | } ~ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, - // ...all the high-bit characters are escaped - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; - -enum DotDisposition { - // The given dot is just part of a filename and is not special. - NOT_A_DIRECTORY, - - // The given dot is the current directory. - DIRECTORY_CUR, - - // The given dot is the first of a double dot that should take us up one. - DIRECTORY_UP -}; - -// When the path resolver finds a dot, this function is called with the -// character following that dot to see what it is. The return value -// indicates what type this dot is (see above). This code handles the case -// where the dot is at the end of the input. -// -// |*consumed_len| will contain the number of characters in the input that -// express what we found. -// -// If the input is "../foo", |after_dot| = 1, |end| = 6, and -// at the end, |*consumed_len| = 2 for the "./" this function consumed. The -// original dot length should be handled by the caller. -template -DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { - if (after_dot == end) { - // Single dot at the end. - *consumed_len = 0; - return DIRECTORY_CUR; - } - if (IsURLSlash(spec[after_dot])) { - // Single dot followed by a slash. - *consumed_len = 1; // Consume the slash - return DIRECTORY_CUR; - } - - int second_dot_len = IsDot(spec, after_dot, end); - if (second_dot_len) { - int after_second_dot = after_dot + second_dot_len; - if (after_second_dot == end) { - // Double dot at the end. - *consumed_len = second_dot_len; - return DIRECTORY_UP; - } - if (IsURLSlash(spec[after_second_dot])) { - // Double dot followed by a slash. - *consumed_len = second_dot_len + 1; - return DIRECTORY_UP; - } - } - - // The dots are followed by something else, not a directory. - *consumed_len = 0; - return NOT_A_DIRECTORY; -} - -// Rewinds the output to the previous slash. It is assumed that the output -// ends with a slash and this doesn't count (we call this when we are -// appending directory paths, so the previous path component has and ending -// slash). -// -// This will stop at the first slash (assumed to be at position -// |path_begin_in_output| and not go any higher than that. Some web pages -// do ".." too many times, so we need to handle that brokenness. -// -// It searches for a literal slash rather than including a backslash as well -// because it is run only on the canonical output. -// -// The output is guaranteed to end in a slash when this function completes. -void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { - DCHECK(output->length() > 0); - - int i = output->length() - 1; - DCHECK(output->at(i) == '/'); - if (i == path_begin_in_output) - return; // We're at the first slash, nothing to do. - - // Now back up (skipping the trailing slash) until we find another slash. - i--; - while (output->at(i) != '/' && i > path_begin_in_output) - i--; - - // Now shrink the output to just include that last slash we found. - output->set_length(i + 1); -} - -// Looks for problematic nested escape sequences and escapes the output as -// needed to ensure they can't be misinterpreted. -// -// Our concern is that in input escape sequence that's invalid because it -// contains nested escape sequences might look valid once those are unescaped. -// For example, "%%300" is not a valid escape sequence, but after unescaping the -// inner "%30" this becomes "%00" which is valid. Leaving this in the output -// string can result in callers re-canonicalizing the string and unescaping this -// sequence, thus resulting in something fundamentally different than the -// original input here. This can cause a variety of problems. -// -// This function is called after we've just unescaped a sequence that's within -// two output characters of a previous '%' that we know didn't begin a valid -// escape sequence in the input string. We look for whether the output is going -// to turn into a valid escape sequence, and if so, convert the initial '%' into -// an escaped "%25" so the output can't be misinterpreted. -// -// |spec| is the input string we're canonicalizing. -// |next_input_index| is the index of the next unprocessed character in |spec|. -// |input_len| is the length of |spec|. -// |last_invalid_percent_index| is the index in |output| of a previously-seen -// '%' character. The caller knows this '%' character isn't followed by a valid -// escape sequence in the input string. -// |output| is the canonicalized output thus far. The caller guarantees this -// ends with a '%' followed by one or two characters, and the '%' is the one -// pointed to by |last_invalid_percent_index|. The last character in the string -// was just unescaped. -template -void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, - int last_invalid_percent_index, CanonOutput* output) { - const int length = output->length(); - const char last_unescaped_char = output->at(length - 1); - - // If |output| currently looks like "%c", we need to try appending the next - // input character to see if this will result in a problematic escape - // sequence. Note that this won't trigger on the first nested escape of a - // two-escape sequence like "%%30%30" -- we'll allow the conversion to - // "%0%30" -- but the second nested escape will be caught by this function - // when it's called again in that case. - const bool append_next_char = last_invalid_percent_index == length - 2; - if (append_next_char) { - // If the input doesn't contain a 7-bit character next, this case won't be a - // problem. - if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) - return; - output->push_back(static_cast(spec[next_input_index])); - } - - // Now output ends like "%cc". Try to unescape this. - int begin = last_invalid_percent_index; - unsigned char temp; - if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { - // New escape sequence found. Overwrite the characters following the '%' - // with "25", and push_back() the one or two characters that were following - // the '%' when we were called. - if (!append_next_char) - output->push_back(output->at(last_invalid_percent_index + 1)); - output->set(last_invalid_percent_index + 1, '2'); - output->set(last_invalid_percent_index + 2, '5'); - output->push_back(last_unescaped_char); - } else if (append_next_char) { - // Not a valid escape sequence, but we still need to undo appending the next - // source character so the caller can process it normally. - output->set_length(length); - } -} - -// Appends the given path to the output. It assumes that if the input path -// starts with a slash, it should be copied to the output. If no path has -// already been appended to the output (the case when not resolving -// relative URLs), the path should begin with a slash. -// -// If there are already path components (this mode is used when appending -// relative paths for resolving), it assumes that the output already has -// a trailing slash and that if the input begins with a slash, it should be -// copied to the output. -// -// We do not collapse multiple slashes in a row to a single slash. It seems -// no web browsers do this, and we don't want incompatibilities, even though -// it would be correct for most systems. -template -bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, - CanonOutput* output) { - int end = path.end(); - - // We use this variable to minimize the amount of work done when unescaping -- - // we'll only call CheckForNestedEscapes() when this points at one of the last - // couple of characters in |output|. - int last_invalid_percent_index = INT_MIN; - - bool success = true; - for (int i = path.begin; i < end; i++) { - UCHAR uch = static_cast(spec[i]); - // Chromium UTF8 logic is unneeded, as the missing templated result - // refers only to char const* (single-byte) characters at this time. - // This only trips up MSVC, since linux gcc seems to optimize it away. - // Indention is to avoid gratuitous diffs to origin source - { - unsigned char out_ch = static_cast(uch); - unsigned char flags = kPathCharLookup[out_ch]; - if (flags & SPECIAL) { - // Needs special handling of some sort. - int dotlen; - if ((dotlen = IsDot(spec, i, end)) > 0) { - // See if this dot was preceded by a slash in the output. We - // assume that when canonicalizing paths, they will always - // start with a slash and not a dot, so we don't have to - // bounds check the output. - // - // Note that we check this in the case of dots so we don't have to - // special case slashes. Since slashes are much more common than - // dots, this actually increases performance measurably (though - // slightly). - DCHECK(output->length() > path_begin_in_output); - if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { - // Slash followed by a dot, check to see if this is means relative - int consumed_len; - switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { - case NOT_A_DIRECTORY: - // Copy the dot to the output, it means nothing special. - output->push_back('.'); - i += dotlen - 1; - break; - case DIRECTORY_CUR: // Current directory, just skip the input. - i += dotlen + consumed_len - 1; - break; - case DIRECTORY_UP: - BackUpToPreviousSlash(path_begin_in_output, output); - i += dotlen + consumed_len - 1; - break; - } - } else { - // This dot is not preceded by a slash, it is just part of some - // file name. - output->push_back('.'); - i += dotlen - 1; - } - - } else if (out_ch == '\\') { - // Convert backslashes to forward slashes - output->push_back('/'); - - } else if (out_ch == '%') { - // Handle escape sequences. - unsigned char unescaped_value; - if (DecodeEscaped(spec, &i, end, &unescaped_value)) { - // Valid escape sequence, see if we keep, reject, or unescape it. - // Note that at this point DecodeEscape() will have advanced |i| to - // the last character of the escape sequence. - char unescaped_flags = kPathCharLookup[unescaped_value]; - - if (unescaped_flags & UNESCAPE) { - // This escaped value shouldn't be escaped. Try to copy it. - output->push_back(unescaped_value); - // If we just unescaped a value within 2 output characters of the - // '%' from a previously-detected invalid escape sequence, we - // might have an input string with problematic nested escape - // sequences; detect and fix them. - if (last_invalid_percent_index >= (output->length() - 3)) { - CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); - } - } else { - // Either this is an invalid escaped character, or it's a valid - // escaped character we should keep escaped. In the first case we - // should just copy it exactly and remember the error. In the - // second we also copy exactly in case the server is sensitive to - // changing the case of any hex letters. - output->push_back('%'); - output->push_back(static_cast(spec[i - 1])); - output->push_back(static_cast(spec[i])); - if (unescaped_flags & INVALID_BIT) - success = false; - } - } else { - // Invalid escape sequence. IE7+ rejects any URLs with such - // sequences, while other browsers pass them through unchanged. We - // use the permissive behavior. - // TODO(brettw): Consider testing IE's strict behavior, which would - // allow removing the code to handle nested escapes above. - last_invalid_percent_index = output->length(); - output->push_back('%'); - } - - } else if (flags & INVALID_BIT) { - // For NULLs, etc. fail. - AppendEscapedChar(out_ch, output); - success = false; - - } else if (flags & ESCAPE_BIT) { - // This character should be escaped. - AppendEscapedChar(out_ch, output); - } - } else { - // Nothing special about this character, just append it. - output->push_back(out_ch); - } - } - } - return success; -} - -template -bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { - bool success = true; - out_path->begin = output->length(); - if (path.len > 0) { - // Write out an initial slash if the input has none. If we just parse a URL - // and then canonicalize it, it will of course have a slash already. This - // check is for the replacement and relative URL resolving cases of file - // URLs. - if (!IsURLSlash(spec[path.begin])) - output->push_back('/'); - - success = DoPartialPath(spec, path, out_path->begin, output); - } else { - // No input, canonical path is a slash. - output->push_back('/'); - } - out_path->len = output->length() - out_path->begin; - return success; -} - -} // namespace - -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path) { - return DoPath(spec, path, output, out_path); -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc deleted file mode 100644 index 0d62cf576424..000000000000 --- a/source/common/chromium_url/url_canon_stdstring.cc +++ /dev/null @@ -1,33 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "source/common/chromium_url/url_canon_stdstring.h" - -namespace chromium_url { - -StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { - cur_len_ = static_cast(str_->size()); // Append to existing data. - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = static_cast(str_->size()); -} - -StdStringCanonOutput::~StdStringCanonOutput() { - // Nothing to do, we don't own the string. -} - -void StdStringCanonOutput::Complete() { - str_->resize(cur_len_); - buffer_len_ = cur_len_; -} - -void StdStringCanonOutput::Resize(int sz) { - str_->resize(sz); - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = sz; -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h deleted file mode 100644 index 6292c4e61dae..000000000000 --- a/source/common/chromium_url/url_canon_stdstring.h +++ /dev/null @@ -1,58 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_STDSTRING_H_ -#define URL_URL_CANON_STDSTRING_H_ - -// This header file defines a canonicalizer output method class for STL -// strings. Because the canonicalizer tries not to be dependent on the STL, -// we have segregated it here. - -#include - -#include "source/common/chromium_url/envoy_shim.h" -#include "source/common/chromium_url/url_canon.h" - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -namespace chromium_url { - -// Write into a std::string given in the constructor. This object does not own -// the string itself, and the user must ensure that the string stays alive -// throughout the lifetime of this object. -// -// The given string will be appended to; any existing data in the string will -// be preserved. -// -// Note that when canonicalization is complete, the string will likely have -// unused space at the end because we make the string very big to start out -// with (by |initial_size|). This ends up being important because resize -// operations are slow, and because the base class needs to write directly -// into the buffer. -// -// Therefore, the user should call Complete() before using the string that -// this class wrote into. -class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { -public: - StdStringCanonOutput(std::string* str); - ~StdStringCanonOutput() override; - - // Must be called after writing has completed but before the string is used. - void Complete(); - - void Resize(int sz) override; - -protected: - std::string* str_; - DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); -}; - -} // namespace chromium_url - -#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h deleted file mode 100644 index b840af60438d..000000000000 --- a/source/common/chromium_url/url_parse.h +++ /dev/null @@ -1,49 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_PARSE_H_ -#define URL_PARSE_H_ - -namespace chromium_url { - -// Component ------------------------------------------------------------------ - -// Represents a substring for URL parsing. -struct Component { - Component() : begin(0), len(-1) {} - - // Normal constructor: takes an offset and a length. - Component(int b, int l) : begin(b), len(l) {} - - int end() const { return begin + len; } - - // Returns true if this component is valid, meaning the length is given. Even - // valid components may be empty to record the fact that they exist. - bool is_valid() const { return (len != -1); } - - // Returns true if the given component is specified on false, the component - // is either empty or invalid. - bool is_nonempty() const { return (len > 0); } - - void reset() { - begin = 0; - len = -1; - } - - bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } - - int begin; // Byte offset in the string of this component. - int len; // Will be -1 if the component is unspecified. -}; - -// Helper that returns a component created with the given begin and ending -// points. The ending point is non-inclusive. -inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } - -} // namespace chromium_url - -#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h deleted file mode 100644 index 0ca47bc48846..000000000000 --- a/source/common/chromium_url/url_parse_internal.h +++ /dev/null @@ -1,18 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_PARSE_INTERNAL_H_ -#define URL_URL_PARSE_INTERNAL_H_ - -namespace chromium_url { - -// We treat slashes and backslashes the same for IE compatibility. -inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } - -} // namespace chromium_url - -#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 3ca8dfa105c9..4adfe5c622a5 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -83,6 +83,18 @@ envoy_cc_library( hdrs = ["compiler_requirements.h"], ) +envoy_cc_library( + name = "dns_utils_lib", + srcs = ["dns_utils.cc"], + hdrs = ["dns_utils.h"], + deps = [ + ":assert_lib", + "//envoy/network:dns_interface", + "//source/common/network:utility_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "documentation_url_lib", hdrs = ["documentation_url.h"], @@ -372,8 +384,8 @@ envoy_cc_library( hdrs = ["thread.h"], external_deps = ["abseil_synchronization"], deps = envoy_cc_platform_dep("thread_impl_lib") + [ + ":macros", ":non_copyable", - "//source/common/singleton:threadsafe_singleton", ], ) diff --git a/source/common/common/backoff_strategy.h b/source/common/common/backoff_strategy.h index cb47e9c6279b..e642c2e8ca21 100644 --- a/source/common/common/backoff_strategy.h +++ b/source/common/common/backoff_strategy.h @@ -29,9 +29,13 @@ class JitteredExponentialBackOffStrategy : public BackOffStrategy { // BackOffStrategy methods uint64_t nextBackOffMs() override; void reset() override; + void reset(uint64_t base_interval) override { + base_interval_ = base_interval; + reset(); + } private: - const uint64_t base_interval_; + uint64_t base_interval_; const uint64_t max_interval_{}; uint64_t next_interval_; Random::RandomGenerator& random_; @@ -53,9 +57,10 @@ class JitteredLowerBoundBackOffStrategy : public BackOffStrategy { // BackOffStrategy methods uint64_t nextBackOffMs() override; void reset() override {} + void reset(uint64_t min_interval) override { min_interval_ = min_interval; } private: - const uint64_t min_interval_; + uint64_t min_interval_; Random::RandomGenerator& random_; }; @@ -74,9 +79,10 @@ class FixedBackOffStrategy : public BackOffStrategy { // BackOffStrategy methods. uint64_t nextBackOffMs() override; void reset() override {} + void reset(uint64_t interval_ms) override { interval_ms_ = interval_ms; } private: - const uint64_t interval_ms_; + uint64_t interval_ms_; }; } // namespace Envoy diff --git a/source/common/common/dns_utils.cc b/source/common/common/dns_utils.cc new file mode 100644 index 000000000000..cbcce39380b9 --- /dev/null +++ b/source/common/common/dns_utils.cc @@ -0,0 +1,62 @@ +#include "source/common/common/dns_utils.h" + +#include "source/common/common/assert.h" +#include "source/common/network/utility.h" +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace DnsUtils { + +Network::DnsLookupFamily +getDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster) { + return getDnsLookupFamilyFromEnum(cluster.dns_lookup_family()); +} + +Network::DnsLookupFamily +getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family) { + switch (family) { + case envoy::config::cluster::v3::Cluster::V6_ONLY: + return Network::DnsLookupFamily::V6Only; + case envoy::config::cluster::v3::Cluster::V4_ONLY: + return Network::DnsLookupFamily::V4Only; + case envoy::config::cluster::v3::Cluster::AUTO: + return Network::DnsLookupFamily::Auto; + case envoy::config::cluster::v3::Cluster::V4_PREFERRED: + return Network::DnsLookupFamily::V4Preferred; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +std::vector +generateAddressList(const std::list& responses, uint32_t port) { + std::vector addresses; + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_multiple_dns_addresses")) { + return addresses; + } + for (const auto& response : responses) { + auto address = Network::Utility::getAddressWithPort(*(response.address_), port); + if (address) { + addresses.push_back(address); + } + } + return addresses; +} + +bool listChanged(const std::vector& list1, + const std::vector& list2) { + if (list1.size() != list2.size()) { + return true; + } + // Eventually we could rewrite this to not count a change to the order of + // addresses as a functional change. + for (size_t i = 0; i < list1.size(); ++i) { + if (*list1[i] != *list2[i]) { + return true; + } + } + return false; +} + +} // namespace DnsUtils +} // namespace Envoy diff --git a/source/common/common/dns_utils.h b/source/common/common/dns_utils.h new file mode 100644 index 000000000000..869a91d2a236 --- /dev/null +++ b/source/common/common/dns_utils.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/network/dns.h" + +namespace Envoy { +namespace DnsUtils { + +/** + * Utility function to get Dns from cluster/enum. + */ +Network::DnsLookupFamily +getDnsLookupFamilyFromCluster(const envoy::config::cluster::v3::Cluster& cluster); +Network::DnsLookupFamily +getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily family); + +// Generates a list of InstanceConstSharedPtr from the DNS responses provided. +std::vector +generateAddressList(const std::list& responses, uint32_t port); + +// Returns true if list1 differs from list2, false otherwise. +bool listChanged(const std::vector& list1, + const std::vector& list2); +} // namespace DnsUtils +} // namespace Envoy diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 284ca290df5d..73425c4c827a 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -47,6 +47,7 @@ namespace Logger { FUNCTION(filter) \ FUNCTION(forward_proxy) \ FUNCTION(grpc) \ + FUNCTION(happy_eyeballs) \ FUNCTION(hc) \ FUNCTION(health_checker) \ FUNCTION(http) \ diff --git a/source/common/common/stl_helpers.h b/source/common/common/stl_helpers.h index 9c1ab9498b33..c1f14b09df7a 100644 --- a/source/common/common/stl_helpers.h +++ b/source/common/common/stl_helpers.h @@ -59,4 +59,10 @@ template std::ostream& operator<<(std::ostream& out, const std::vector return out; } +// Overload std::operator<< to output a pair. +template +std::ostream& operator<<(std::ostream& out, const std::pair& v) { + out << "pair(" << v.first << ", " << v.second << ")"; + return out; +} } // namespace std diff --git a/source/common/common/thread.cc b/source/common/common/thread.cc index e6ee78c8e520..63cac85f6ca0 100644 --- a/source/common/common/thread.cc +++ b/source/common/common/thread.cc @@ -1,36 +1,120 @@ #include "source/common/common/thread.h" +#include + +#include "source/common/common/assert.h" +#include "source/common/common/macros.h" + namespace Envoy { namespace Thread { -bool MainThread::isMainThread() { - // If threading is off, only main thread is running. - auto main_thread_singleton = MainThreadSingleton::getExisting(); - if (main_thread_singleton == nullptr) { - return true; +namespace { + +// Singleton structure capturing which thread is the main dispatcher thread, and +// which is the test thread. This info is used for assertions around catching +// exceptions and accessing data structures which are not mutex-protected, and +// are expected only from the main thread. +// +// TODO(jmarantz): avoid the singleton and instead have this object owned +// by the ThreadFactory. That will require plumbing the API::API into all +// call-sites for isMainThread(), which might be a bit of work, but will make +// tests more hermetic. +struct ThreadIds { + // Determines whether we are currently running on the main-thread or + // test-thread. We need to allow for either one because we don't establish + // the full threading model in all unit tests. + bool inMainOrTestThread() const { + // We don't take the lock when testing the thread IDs, as they are atomic, + // and are cleared when being released. All possible thread orderings + // result in the correct result even without a lock. + std::thread::id id = std::this_thread::get_id(); + return main_thread_id_ == id || test_thread_id_ == id; + } + + bool isMainThreadActive() const { + absl::MutexLock lock(&mutex_); + return main_thread_use_count_ != 0; + } + + // Returns a singleton instance of this. The instance is never freed. + static ThreadIds& get() { MUTABLE_CONSTRUCT_ON_FIRST_USE(ThreadIds); } + + // Call this when the MainThread exits. Nested semantics are supported, so + // that if multiple MainThread instances are declared, we unwind them + // properly. + void releaseMainThread() { + absl::MutexLock lock(&mutex_); + ASSERT(main_thread_use_count_ > 0); + ASSERT(std::this_thread::get_id() == main_thread_id_); + if (--main_thread_use_count_ == 0) { + // Clearing the thread ID when its use-count goes to zero allows us + // to read the atomic without taking a lock. + main_thread_id_ = std::thread::id{}; + } + } + + // Call this when the TestThread exits. Nested semantics are supported, so + // that if multiple TestThread instances are declared, we unwind them + // properly. + void releaseTestThread() { + absl::MutexLock lock(&mutex_); + ASSERT(test_thread_use_count_ > 0); + ASSERT(std::this_thread::get_id() == test_thread_id_); + if (--test_thread_use_count_ == 0) { + // Clearing the thread ID when its use-count goes to zero allows us + // to read the atomic without taking a lock. + test_thread_id_ = std::thread::id{}; + } } - // When threading is on, compare thread id with main thread id. - return main_thread_singleton->inMainThread() || main_thread_singleton->inTestThread(); -} - -void MainThread::clear() { - delete MainThreadSingleton::getExisting(); - MainThreadSingleton::clear(); -} - -void MainThread::initTestThread() { - if (!initialized()) { - MainThreadSingleton::initialize(new MainThread()); + + // Declares current thread as the main one, or verifies that the current + // thread matches any previous declarations. + void registerMainThread() { + absl::MutexLock lock(&mutex_); + if (++main_thread_use_count_ > 1) { + ASSERT(std::this_thread::get_id() == main_thread_id_); + } else { + main_thread_id_ = std::this_thread::get_id(); + } } - MainThreadSingleton::get().registerTestThread(); -} -void MainThread::initMainThread() { - if (!initialized()) { - MainThreadSingleton::initialize(new MainThread()); + // Declares current thread as the test thread, or verifies that the current + // thread matches any previous declarations. + void registerTestThread() { + absl::MutexLock lock(&mutex_); + if (++test_thread_use_count_ > 1) { + ASSERT(std::this_thread::get_id() == test_thread_id_); + } else { + test_thread_id_ = std::this_thread::get_id(); + } } - MainThreadSingleton::get().registerMainThread(); -} + +private: + // The atomic thread IDs can be read without a mutex, but they are written + // under a mutex so that they are consistent with their use_counts. this + // avoids the possibility of two threads racing to claim being the main/test + // thread. + std::atomic main_thread_id_; + std::atomic test_thread_id_; + + int32_t main_thread_use_count_ GUARDED_BY(mutex_) = 0; + int32_t test_thread_use_count_ GUARDED_BY(mutex_) = 0; + mutable absl::Mutex mutex_; +}; + +} // namespace + +bool MainThread::isMainOrTestThread() { return ThreadIds::get().inMainOrTestThread(); } + +bool MainThread::isMainThreadActive() { return ThreadIds::get().isMainThreadActive(); } + +TestThread::TestThread() { ThreadIds::get().registerTestThread(); } + +TestThread::~TestThread() { ThreadIds::get().releaseTestThread(); } + +MainThread::MainThread() { ThreadIds::get().registerMainThread(); } + +MainThread::~MainThread() { ThreadIds::get().releaseMainThread(); } } // namespace Thread } // namespace Envoy diff --git a/source/common/common/thread.h b/source/common/common/thread.h index 1ade49a3d1b9..f1f415cf04d5 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -8,7 +8,6 @@ #include "envoy/thread/thread.h" #include "source/common/common/non_copyable.h" -#include "source/common/singleton/threadsafe_singleton.h" #include "absl/synchronization/mutex.h" @@ -169,35 +168,43 @@ class AtomicPtr : private AtomicPtrArray { T* get(const MakeObject& make_object) { return BaseClass::get(0, make_object); } }; -struct MainThread { - using MainThreadSingleton = InjectableSingleton; - bool inMainThread() const { return main_thread_id_ == std::this_thread::get_id(); } - bool inTestThread() const { - return test_thread_id_.has_value() && (test_thread_id_.value() == std::this_thread::get_id()); - } - void registerTestThread() { test_thread_id_ = std::this_thread::get_id(); } - void registerMainThread() { main_thread_id_ = std::this_thread::get_id(); } - static bool initialized() { return MainThreadSingleton::getExisting() != nullptr; } - /* - * Register the main thread id, should be called in main thread before threading is on. Currently - * called in ThreadLocal::InstanceImpl(). - */ - static void initMainThread(); - /* - * Register the test thread id, should be called in test thread before threading is on. Allow - * some main thread only code to be executed on test thread. - */ - static void initTestThread(); - /* - * Delete the main thread singleton, should be called in main thread after threading - * has been shut down. Currently called in ~ThreadLocal::InstanceImpl(). +// RAII object to declare the TestThread. This should be declared in main() or +// equivalent for any test binaries. +// +// Generally we expect TestThread to be instantiated only once on main() for +// each test binary, though nested instantiations are allowed as long as the +// thread ID does not change. +class TestThread { +public: + TestThread(); + ~TestThread(); +}; + +// RAII object to declare the MainThread. This should be declared in the thread +// function or equivalent. +// +// Generally we expect MainThread to be instantiated only once or twice. It has +// to be instantiated prior to OptionsImpl being created, so it needs to be in +// instantiated from main_common(). In addition, it is instantiated by +// ThreadLocal implementation to get the correct behavior for tests that do not +// instantiate main. +// +// In general, nested instantiations are allowed as long as the thread ID does +// not change. +class MainThread { +public: + MainThread(); + ~MainThread(); + + /** + * @return whether the current thread is the main thread or test thread. */ - static void clear(); - static bool isMainThread(); + static bool isMainOrTestThread(); -private: - std::thread::id main_thread_id_; - absl::optional test_thread_id_; + /** + * @return whether a MainThread has been instantiated. + */ + static bool isMainThreadActive(); }; // To improve exception safety in data plane, we plan to forbid the use of raw try in the core code @@ -205,7 +212,7 @@ struct MainThread { // worker thread. #define TRY_ASSERT_MAIN_THREAD \ try { \ - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); #define END_TRY } diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 2fd7a8c75748..b7a0a5f14d29 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -86,8 +86,16 @@ envoy_cc_library( envoy_cc_library( name = "delta_subscription_state_lib", - srcs = ["delta_subscription_state.cc"], - hdrs = ["delta_subscription_state.h"], + srcs = [ + "delta_subscription_state.cc", + "new_delta_subscription_state.cc", + "old_delta_subscription_state.cc", + ], + hdrs = [ + "delta_subscription_state.h", + "new_delta_subscription_state.h", + "old_delta_subscription_state.h", + ], deps = [ ":api_version_lib", ":pausable_ack_queue_lib", @@ -316,6 +324,7 @@ envoy_cc_library( "//envoy/config:subscription_interface", "//envoy/upstream:cluster_manager_interface", "//source/common/common:minimal_logger_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/http:utility_lib", "//source/common/protobuf", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -387,6 +396,7 @@ envoy_cc_library( "//source/common/stats:stats_matcher_lib", "//source/common/stats:tag_producer_lib", "@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto", + "@com_github_cncf_udpa//xds/type/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -400,6 +410,7 @@ envoy_cc_library( hdrs = ["watch_map.h"], deps = [ ":decoded_resource_lib", + ":utility_lib", ":xds_resource_lib", "//envoy/config:subscription_interface", "//source/common/common:assert_lib", diff --git a/source/common/config/context_provider_impl.h b/source/common/config/context_provider_impl.h index 3123b2066cef..590b9c4eff2b 100644 --- a/source/common/config/context_provider_impl.h +++ b/source/common/config/context_provider_impl.h @@ -20,7 +20,7 @@ class ContextProviderImpl : public ContextProvider { const xds::core::v3::ContextParams& nodeContext() const override { return node_context_; } const xds::core::v3::ContextParams& dynamicContext(absl::string_view resource_type_url) const override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); auto it = dynamic_context_.find(resource_type_url); if (it != dynamic_context_.end()) { return it->second; @@ -29,7 +29,7 @@ class ContextProviderImpl : public ContextProvider { }; void setDynamicContextParam(absl::string_view resource_type_url, absl::string_view key, absl::string_view value) override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); (*dynamic_context_[resource_type_url] .mutable_params())[toStdStringView(key)] = // NOLINT(std::string_view) toStdStringView(value); // NOLINT(std::string_view) @@ -37,14 +37,14 @@ class ContextProviderImpl : public ContextProvider { } void unsetDynamicContextParam(absl::string_view resource_type_url, absl::string_view key) override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); dynamic_context_[resource_type_url].mutable_params()->erase( toStdStringView(key)); // NOLINT(std::string_view) update_cb_helper_.runCallbacks(resource_type_url); } ABSL_MUST_USE_RESULT Common::CallbackHandlePtr addDynamicContextUpdateCallback(UpdateNotificationCb callback) const override { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); return update_cb_helper_.add(callback); }; diff --git a/source/common/config/datasource.cc b/source/common/config/datasource.cc index 19a4b82e6a9d..d39e517aa289 100644 --- a/source/common/config/datasource.cc +++ b/source/common/config/datasource.cc @@ -26,6 +26,15 @@ std::string read(const envoy::config::core::v3::DataSource& source, bool allow_e case envoy::config::core::v3::DataSource::SpecifierCase::kInlineString: data = source.inline_string(); break; + case envoy::config::core::v3::DataSource::SpecifierCase::kEnvironmentVariable: { + const char* environment_variable = std::getenv(source.environment_variable().c_str()); + if (environment_variable == nullptr) { + throw EnvoyException( + fmt::format("Environment variable doesn't exist: {}", source.environment_variable())); + } + data = environment_variable; + break; + } default: if (!allow_empty) { throw EnvoyException( diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index b1b162d3f327..39429f88b4a5 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -1,239 +1,103 @@ #include "source/common/config/delta_subscription_state.h" -#include "envoy/event/dispatcher.h" -#include "envoy/service/discovery/v3/discovery.pb.h" - -#include "source/common/common/assert.h" -#include "source/common/common/hash.h" -#include "source/common/config/utility.h" #include "source/common/runtime/runtime_features.h" namespace Envoy { namespace Config { +namespace { + +DeltaSubscriptionStateVariant getState(std::string type_url, + UntypedConfigUpdateCallbacks& watch_map, + const LocalInfo::LocalInfo& local_info, + Event::Dispatcher& dispatcher) { + if (Runtime::runtimeFeatureEnabled("envoy.restart_features.explicit_wildcard_resource")) { + return DeltaSubscriptionStateVariant(absl::in_place_type, + std::move(type_url), watch_map, local_info, dispatcher); + } else { + return DeltaSubscriptionStateVariant(absl::in_place_type, + std::move(type_url), watch_map, local_info, dispatcher); + } +} + +} // namespace DeltaSubscriptionState::DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, const LocalInfo::LocalInfo& local_info, - Event::Dispatcher& dispatcher, const bool wildcard) - // TODO(snowp): Hard coding VHDS here is temporary until we can move it away from relying on - // empty resources as updates. - : supports_heartbeats_(type_url != "envoy.config.route.v3.VirtualHost"), - ttl_( - [this](const auto& expired) { - Protobuf::RepeatedPtrField removed_resources; - for (const auto& resource : expired) { - setResourceWaitingForServer(resource); - removed_resources.Add(std::string(resource)); - } - - watch_map_.onConfigUpdate({}, removed_resources, ""); - }, - dispatcher, dispatcher.timeSource()), - type_url_(std::move(type_url)), wildcard_(wildcard), watch_map_(watch_map), - local_info_(local_info), dispatcher_(dispatcher) {} + Event::Dispatcher& dispatcher) + : state_(getState(std::move(type_url), watch_map, local_info, dispatcher)) {} void DeltaSubscriptionState::updateSubscriptionInterest( const absl::flat_hash_set& cur_added, const absl::flat_hash_set& cur_removed) { - for (const auto& a : cur_added) { - setResourceWaitingForServer(a); - // If interest in a resource is removed-then-added (all before a discovery request - // can be sent), we must treat it as a "new" addition: our user may have forgotten its - // copy of the resource after instructing us to remove it, and need to be reminded of it. - names_removed_.erase(a); - names_added_.insert(a); - } - for (const auto& r : cur_removed) { - removeResourceState(r); - // Ideally, when interest in a resource is added-then-removed in between requests, - // we would avoid putting a superfluous "unsubscribe [resource that was never subscribed]" - // in the request. However, the removed-then-added case *does* need to go in the request, - // and due to how we accomplish that, it's difficult to distinguish remove-add-remove from - // add-remove (because "remove-add" has to be treated as equivalent to just "add"). - names_added_.erase(r); - names_removed_.insert(r); + if (auto* state = absl::get_if(&state_); state != nullptr) { + state->updateSubscriptionInterest(cur_added, cur_removed); + return; } + auto& state = absl::get(state_); + state.updateSubscriptionInterest(cur_added, cur_removed); } -// Not having sent any requests yet counts as an "update pending" since you're supposed to resend -// the entirety of your interest at the start of a stream, even if nothing has changed. -bool DeltaSubscriptionState::subscriptionUpdatePending() const { - return !names_added_.empty() || !names_removed_.empty() || - !any_request_sent_yet_in_current_stream_ || must_send_discovery_request_; +void DeltaSubscriptionState::setMustSendDiscoveryRequest() { + if (auto* state = absl::get_if(&state_); state != nullptr) { + state->setMustSendDiscoveryRequest(); + return; + } + auto& state = absl::get(state_); + state.setMustSendDiscoveryRequest(); } -UpdateAck DeltaSubscriptionState::handleResponse( - const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { - // We *always* copy the response's nonce into the next request, even if we're going to make that - // request a NACK by setting error_detail. - UpdateAck ack(message.nonce(), type_url_); - TRY_ASSERT_MAIN_THREAD { handleGoodResponse(message); } - END_TRY - catch (const EnvoyException& e) { - handleBadResponse(e, ack); +bool DeltaSubscriptionState::subscriptionUpdatePending() const { + if (auto* state = absl::get_if(&state_); state != nullptr) { + return state->subscriptionUpdatePending(); } - return ack; + auto& state = absl::get(state_); + return state.subscriptionUpdatePending(); } -bool DeltaSubscriptionState::isHeartbeatResponse( - const envoy::service::discovery::v3::Resource& resource) const { - if (!supports_heartbeats_ && - !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.vhds_heartbeats")) { - return false; - } - const auto itr = resource_state_.find(resource.name()); - if (itr == resource_state_.end()) { - return false; +void DeltaSubscriptionState::markStreamFresh() { + if (auto* state = absl::get_if(&state_); state != nullptr) { + state->markStreamFresh(); + return; } - - return !resource.has_resource() && !itr->second.waitingForServer() && - resource.version() == itr->second.version(); + auto& state = absl::get(state_); + state.markStreamFresh(); } -void DeltaSubscriptionState::handleGoodResponse( +UpdateAck DeltaSubscriptionState::handleResponse( const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { - absl::flat_hash_set names_added_removed; - Protobuf::RepeatedPtrField non_heartbeat_resources; - for (const auto& resource : message.resources()) { - if (!names_added_removed.insert(resource.name()).second) { - throw EnvoyException( - fmt::format("duplicate name {} found among added/updated resources", resource.name())); - } - if (isHeartbeatResponse(resource)) { - continue; - } - non_heartbeat_resources.Add()->CopyFrom(resource); - // DeltaDiscoveryResponses for unresolved aliases don't contain an actual resource - if (!resource.has_resource() && resource.aliases_size() > 0) { - continue; - } - if (message.type_url() != resource.resource().type_url()) { - throw EnvoyException(fmt::format("type URL {} embedded in an individual Any does not match " - "the message-wide type URL {} in DeltaDiscoveryResponse {}", - resource.resource().type_url(), message.type_url(), - message.DebugString())); - } - } - for (const auto& name : message.removed_resources()) { - if (!names_added_removed.insert(name).second) { - throw EnvoyException( - fmt::format("duplicate name {} found in the union of added+removed resources", name)); - } - } - - { - const auto scoped_update = ttl_.scopedTtlUpdate(); - for (const auto& resource : message.resources()) { - addResourceState(resource); - } + if (auto* state = absl::get_if(&state_); state != nullptr) { + return state->handleResponse(message); } - - watch_map_.onConfigUpdate(non_heartbeat_resources, message.removed_resources(), - message.system_version_info()); - - // If a resource is gone, there is no longer a meaningful version for it that makes sense to - // provide to the server upon stream reconnect: either it will continue to not exist, in which - // case saying nothing is fine, or the server will bring back something new, which we should - // receive regardless (which is the logic that not specifying a version will get you). - // - // So, leave the version map entry present but blank. It will be left out of - // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm - // cancelling my subscription" when we lose interest. - for (const auto& resource_name : message.removed_resources()) { - if (resource_names_.find(resource_name) != resource_names_.end()) { - setResourceWaitingForServer(resource_name); - } - } - ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, - message.resources().size(), message.removed_resources().size()); -} - -void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) { - // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK. - ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); - ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); - ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); - watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); + auto& state = absl::get(state_); + return state.handleResponse(message); } void DeltaSubscriptionState::handleEstablishmentFailure() { - watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, - nullptr); + if (auto* state = absl::get_if(&state_); state != nullptr) { + state->handleEstablishmentFailure(); + return; + } + auto& state = absl::get(state_); + state.handleEstablishmentFailure(); } envoy::service::discovery::v3::DeltaDiscoveryRequest DeltaSubscriptionState::getNextRequestAckless() { - envoy::service::discovery::v3::DeltaDiscoveryRequest request; - must_send_discovery_request_ = false; - if (!any_request_sent_yet_in_current_stream_) { - any_request_sent_yet_in_current_stream_ = true; - // initial_resource_versions "must be populated for first request in a stream". - // Also, since this might be a new server, we must explicitly state *all* of our subscription - // interest. - for (auto const& [resource_name, resource_state] : resource_state_) { - // Populate initial_resource_versions with the resource versions we currently have. - // Resources we are interested in, but are still waiting to get any version of from the - // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) - if (!resource_state.waitingForServer()) { - (*request.mutable_initial_resource_versions())[resource_name] = resource_state.version(); - } - // As mentioned above, fill resource_names_subscribe with everything, including names we - // have yet to receive any resource for unless this is a wildcard subscription, for which - // the first request on a stream must be without any resource names. - if (!wildcard_) { - names_added_.insert(resource_name); - } - } - // Wildcard subscription initial requests must have no resource_names_subscribe. - if (wildcard_) { - names_added_.clear(); - } - names_removed_.clear(); + if (auto* state = absl::get_if(&state_); state != nullptr) { + return state->getNextRequestAckless(); } - std::copy(names_added_.begin(), names_added_.end(), - Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_subscribe())); - std::copy(names_removed_.begin(), names_removed_.end(), - Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_unsubscribe())); - names_added_.clear(); - names_removed_.clear(); - - request.set_type_url(type_url_); - request.mutable_node()->MergeFrom(local_info_.node()); - return request; + auto& state = absl::get(state_); + return state.getNextRequestAckless(); } envoy::service::discovery::v3::DeltaDiscoveryRequest DeltaSubscriptionState::getNextRequestWithAck(const UpdateAck& ack) { - envoy::service::discovery::v3::DeltaDiscoveryRequest request = getNextRequestAckless(); - request.set_response_nonce(ack.nonce_); - if (ack.error_detail_.code() != Grpc::Status::WellKnownGrpcStatus::Ok) { - // Don't needlessly make the field present-but-empty if status is ok. - request.mutable_error_detail()->CopyFrom(ack.error_detail_); + if (auto* state = absl::get_if(&state_); state != nullptr) { + return state->getNextRequestWithAck(ack); } - return request; -} - -void DeltaSubscriptionState::addResourceState( - const envoy::service::discovery::v3::Resource& resource) { - if (resource.has_ttl()) { - ttl_.add(std::chrono::milliseconds(DurationUtil::durationToMilliseconds(resource.ttl())), - resource.name()); - } else { - ttl_.clear(resource.name()); - } - - resource_state_[resource.name()] = ResourceState(resource); - resource_names_.insert(resource.name()); -} - -void DeltaSubscriptionState::setResourceWaitingForServer(const std::string& resource_name) { - resource_state_[resource_name] = ResourceState(); - resource_names_.insert(resource_name); -} - -void DeltaSubscriptionState::removeResourceState(const std::string& resource_name) { - resource_state_.erase(resource_name); - resource_names_.erase(resource_name); + auto& state = absl::get(state_); + return state.getNextRequestWithAck(ack); } } // namespace Config diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h index 9765a6736dc3..6b613ade0b4f 100644 --- a/source/common/config/delta_subscription_state.h +++ b/source/common/config/delta_subscription_state.h @@ -1,123 +1,42 @@ #pragma once #include "envoy/config/subscription.h" -#include "envoy/event/dispatcher.h" -#include "envoy/grpc/status.h" #include "envoy/local_info/local_info.h" #include "envoy/service/discovery/v3/discovery.pb.h" -#include "source/common/common/assert.h" #include "source/common/common/logger.h" -#include "source/common/config/api_version.h" -#include "source/common/config/pausable_ack_queue.h" -#include "source/common/config/ttl.h" -#include "source/common/config/watch_map.h" +#include "source/common/config/new_delta_subscription_state.h" +#include "source/common/config/old_delta_subscription_state.h" -#include "absl/container/node_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/types/variant.h" namespace Envoy { namespace Config { -// Tracks the xDS protocol state of an individual ongoing delta xDS session, i.e. a single type_url. -// There can be multiple DeltaSubscriptionStates active. They will always all be -// blissfully unaware of each other's existence, even when their messages are -// being multiplexed together by ADS. +using DeltaSubscriptionStateVariant = + absl::variant; + class DeltaSubscriptionState : public Logger::Loggable { public: DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, - const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - const bool wildcard); + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher); - // Update which resources we're interested in subscribing to. void updateSubscriptionInterest(const absl::flat_hash_set& cur_added, const absl::flat_hash_set& cur_removed); - void addAliasesToResolve(const absl::flat_hash_set& aliases); - void setMustSendDiscoveryRequest() { must_send_discovery_request_ = true; } - - // Whether there was a change in our subscription interest we have yet to inform the server of. + void setMustSendDiscoveryRequest(); bool subscriptionUpdatePending() const; - - void markStreamFresh() { any_request_sent_yet_in_current_stream_ = false; } - + void markStreamFresh(); UpdateAck handleResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); - void handleEstablishmentFailure(); - - // Returns the next gRPC request proto to be sent off to the server, based on this object's - // understanding of the current protocol state, and new resources that Envoy wants to request. envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestAckless(); - - // The WithAck version first calls the Ack-less version, then adds in the passed-in ack. envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestWithAck(const UpdateAck& ack); DeltaSubscriptionState(const DeltaSubscriptionState&) = delete; DeltaSubscriptionState& operator=(const DeltaSubscriptionState&) = delete; private: - bool isHeartbeatResponse(const envoy::service::discovery::v3::Resource& resource) const; - void handleGoodResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); - void handleBadResponse(const EnvoyException& e, UpdateAck& ack); - - class ResourceState { - public: - ResourceState(const envoy::service::discovery::v3::Resource& resource) - : version_(resource.version()) {} - - // Builds a ResourceState in the waitingForServer state. - ResourceState() = default; - - // If true, we currently have no version of this resource - we are waiting for the server to - // provide us with one. - bool waitingForServer() const { return version_ == absl::nullopt; } - - // Must not be called if waitingForServer() == true. - std::string version() const { - ASSERT(version_.has_value()); - return version_.value_or(""); - } - - private: - absl::optional version_; - }; - - // Use these helpers to ensure resource_state_ and resource_names_ get updated together. - void addResourceState(const envoy::service::discovery::v3::Resource& resource); - void setResourceWaitingForServer(const std::string& resource_name); - void removeResourceState(const std::string& resource_name); - - void populateDiscoveryRequest(envoy::service::discovery::v3::DeltaDiscoveryResponse& request); - - // A map from resource name to per-resource version. The keys of this map are exactly the resource - // names we are currently interested in. Those in the waitingForServer state currently don't have - // any version for that resource: we need to inform the server if we lose interest in them, but we - // also need to *not* include them in the initial_resource_versions map upon a reconnect. - absl::node_hash_map resource_state_; - - // Not all xDS resources supports heartbeats due to there being specific information encoded in - // an empty response, which is indistinguishable from a heartbeat in some cases. For now we just - // disable heartbeats for these resources (currently only VHDS). - const bool supports_heartbeats_; - TtlManager ttl_; - // The keys of resource_versions_. Only tracked separately because std::map does not provide an - // iterator into just its keys. - absl::flat_hash_set resource_names_; - - const std::string type_url_; - // Is the subscription is for a wildcard request. - const bool wildcard_; - UntypedConfigUpdateCallbacks& watch_map_; - const LocalInfo::LocalInfo& local_info_; - Event::Dispatcher& dispatcher_; - std::chrono::milliseconds init_fetch_timeout_; - - bool any_request_sent_yet_in_current_stream_{}; - bool must_send_discovery_request_{}; - - // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. - // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching. - // Feel free to change to an unordered container once we figure out how to make it work. - std::set names_added_; - std::set names_removed_; + DeltaSubscriptionStateVariant state_; }; } // namespace Config diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index 23ee9a0d287b..d7469f786045 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -169,24 +169,31 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, return; } - const uint64_t ms_since_first_close = std::chrono::duration_cast( - time_source_.monotonicTime() - last_close_time_) - .count(); + const auto duration_since_first_close = time_source_.monotonicTime() - last_close_time_; + const uint64_t seconds_since_first_close = + std::chrono::duration_cast(duration_since_first_close).count(); const Grpc::Status::GrpcStatus close_status = last_close_status_.value(); if (status != close_status) { // This is a different failure. Warn on both statuses and remember the new one. - ENVOY_LOG(warn, "{} gRPC config stream closed: {}, {} (previously {}, {} since {}ms ago)", + ENVOY_LOG(warn, "{} gRPC config stream closed: {}, {} (previously {}, {} since {}s ago)", service_method_.name(), status, message, close_status, last_close_message_, - ms_since_first_close); + seconds_since_first_close); setCloseStatus(status, message); return; } + // #18508: The error message may have changed. + // To reduce noise, do not update the last close time, or use the message to distinguish the + // error in the previous condition. + last_close_message_ = message; + + const uint64_t ms_since_first_close = + std::chrono::duration_cast(duration_since_first_close).count(); if (ms_since_first_close > RetryMaxDelayMs) { // Warn if we are over the time limit. - ENVOY_LOG(warn, "{} gRPC config stream closed since {}ms ago: {}, {}", service_method_.name(), - ms_since_first_close, close_status, last_close_message_); + ENVOY_LOG(warn, "{} gRPC config stream closed since {}s ago: {}, {}", service_method_.name(), + seconds_since_first_close, close_status, message); return; } diff --git a/source/common/config/new_delta_subscription_state.cc b/source/common/config/new_delta_subscription_state.cc new file mode 100644 index 000000000000..94f25ac952eb --- /dev/null +++ b/source/common/config/new_delta_subscription_state.cc @@ -0,0 +1,408 @@ +#include "source/common/config/new_delta_subscription_state.h" + +#include "envoy/event/dispatcher.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/common/hash.h" +#include "source/common/config/utility.h" +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace Config { + +NewDeltaSubscriptionState::NewDeltaSubscriptionState(std::string type_url, + UntypedConfigUpdateCallbacks& watch_map, + const LocalInfo::LocalInfo& local_info, + Event::Dispatcher& dispatcher) + // TODO(snowp): Hard coding VHDS here is temporary until we can move it away from relying on + // empty resources as updates. + : supports_heartbeats_(type_url != "envoy.config.route.v3.VirtualHost"), + ttl_( + [this](const auto& expired) { + Protobuf::RepeatedPtrField removed_resources; + for (const auto& resource : expired) { + if (auto maybe_resource = getRequestedResourceState(resource); + maybe_resource.has_value()) { + maybe_resource->setAsWaitingForServer(); + removed_resources.Add(std::string(resource)); + } else if (const auto erased_count = wildcard_resource_state_.erase(resource) + + ambiguous_resource_state_.erase(resource); + erased_count > 0) { + removed_resources.Add(std::string(resource)); + } + } + + watch_map_.onConfigUpdate({}, removed_resources, ""); + }, + dispatcher, dispatcher.timeSource()), + type_url_(std::move(type_url)), watch_map_(watch_map), local_info_(local_info) {} + +void NewDeltaSubscriptionState::updateSubscriptionInterest( + const absl::flat_hash_set& cur_added, + const absl::flat_hash_set& cur_removed) { + for (const auto& a : cur_added) { + if (in_initial_legacy_wildcard_ && a != Wildcard) { + in_initial_legacy_wildcard_ = false; + } + // If the requested resource existed as a wildcard resource, + // transition it to requested. Otherwise mark it as a resource + // waiting for the server to receive the version. + if (auto it = wildcard_resource_state_.find(a); it != wildcard_resource_state_.end()) { + requested_resource_state_.insert_or_assign(a, ResourceState::withVersion(it->second)); + wildcard_resource_state_.erase(it); + } else if (it = ambiguous_resource_state_.find(a); it != ambiguous_resource_state_.end()) { + requested_resource_state_.insert_or_assign(a, ResourceState::withVersion(it->second)); + ambiguous_resource_state_.erase(it); + } else { + requested_resource_state_.insert_or_assign(a, ResourceState::waitingForServer()); + } + ASSERT(requested_resource_state_.contains(a)); + ASSERT(!wildcard_resource_state_.contains(a)); + ASSERT(!ambiguous_resource_state_.contains(a)); + // If interest in a resource is removed-then-added (all before a discovery request + // can be sent), we must treat it as a "new" addition: our user may have forgotten its + // copy of the resource after instructing us to remove it, and need to be reminded of it. + names_removed_.erase(a); + names_added_.insert(a); + } + for (const auto& r : cur_removed) { + auto actually_erased = false; + // The resource we have lost the interest in could also come from our wildcard subscription. We + // just don't know it at this point. Instead of removing it outright, mark the resource as not + // interesting to us any more and the server will send us an update. If we don't have a wildcard + // subscription then there is no ambiguity and just drop the resource. + if (requested_resource_state_.contains(Wildcard)) { + if (auto it = requested_resource_state_.find(r); it != requested_resource_state_.end()) { + // Wildcard resources always have a version. If our requested resource has no version, it + // won't be a wildcard resource then. If r is Wildcard itself, then it never has a version + // attached to it, so it will not be moved to ambiguous category. + if (!it->second.isWaitingForServer()) { + ambiguous_resource_state_.insert({it->first, it->second.version()}); + } + requested_resource_state_.erase(it); + actually_erased = true; + } + } else { + actually_erased = (requested_resource_state_.erase(r) > 0); + } + ASSERT(!requested_resource_state_.contains(r)); + // Ideally, when interest in a resource is added-then-removed in between requests, + // we would avoid putting a superfluous "unsubscribe [resource that was never subscribed]" + // in the request. However, the removed-then-added case *does* need to go in the request, + // and due to how we accomplish that, it's difficult to distinguish remove-add-remove from + // add-remove (because "remove-add" has to be treated as equivalent to just "add"). + names_added_.erase(r); + if (actually_erased) { + names_removed_.insert(r); + in_initial_legacy_wildcard_ = false; + } + } + // If we unsubscribe from wildcard resource, drop all the resources that came from wildcard from + // cache. Also drop the ambiguous resources - we aren't interested in those, but we didn't know if + // those came from wildcard subscription or not, but now it's not important any more. + if (cur_removed.contains(Wildcard)) { + wildcard_resource_state_.clear(); + ambiguous_resource_state_.clear(); + } +} + +// Not having sent any requests yet counts as an "update pending" since you're supposed to resend +// the entirety of your interest at the start of a stream, even if nothing has changed. +bool NewDeltaSubscriptionState::subscriptionUpdatePending() const { + if (!names_added_.empty() || !names_removed_.empty()) { + return true; + } + // At this point, we have no new resources to subscribe to or any + // resources to unsubscribe from. + if (!any_request_sent_yet_in_current_stream_) { + // If we haven't sent anything on the current stream, but we are actually interested in some + // resource then we obviously need to let the server know about those. + if (!requested_resource_state_.empty()) { + return true; + } + // So there are no new names and we are interested in nothing. This may either mean that we want + // the legacy wildcard subscription to kick in or we actually unsubscribed from everything. If + // the latter is true, then we should not be sending any requests. In such case the initial + // wildcard mode will be false. Otherwise it means that the legacy wildcard request should be + // sent. + return in_initial_legacy_wildcard_; + } + + // At this point, we have no changes in subscription resources and this isn't a first request in + // the stream, so even if there are no resources we are interested in, we can send the request, + // because even if it's empty, it won't be interpreted as legacy wildcard subscription, which can + // only for the first request in the stream. So sending an empty request at this point should be + // harmless. + return must_send_discovery_request_; +} + +UpdateAck NewDeltaSubscriptionState::handleResponse( + const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { + // We *always* copy the response's nonce into the next request, even if we're going to make that + // request a NACK by setting error_detail. + UpdateAck ack(message.nonce(), type_url_); + TRY_ASSERT_MAIN_THREAD { handleGoodResponse(message); } + END_TRY + catch (const EnvoyException& e) { + handleBadResponse(e, ack); + } + return ack; +} + +bool NewDeltaSubscriptionState::isHeartbeatResponse( + const envoy::service::discovery::v3::Resource& resource) const { + if (!supports_heartbeats_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.vhds_heartbeats")) { + return false; + } + if (resource.has_resource()) { + return false; + } + + if (const auto maybe_resource = getRequestedResourceState(resource.name()); + maybe_resource.has_value()) { + return !maybe_resource->isWaitingForServer() && resource.version() == maybe_resource->version(); + } + + if (const auto itr = wildcard_resource_state_.find(resource.name()); + itr != wildcard_resource_state_.end()) { + return resource.version() == itr->second; + } + + if (const auto itr = ambiguous_resource_state_.find(resource.name()); + itr != wildcard_resource_state_.end()) { + // In theory we should move the ambiguous resource to wildcard, because probably we shouldn't be + // getting heartbeat responses about resources that we are not interested in, but the server + // could have sent this heartbeat before it learned about our lack of interest in the resource. + return resource.version() == itr->second; + } + + return false; +} + +void NewDeltaSubscriptionState::handleGoodResponse( + const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { + absl::flat_hash_set names_added_removed; + Protobuf::RepeatedPtrField non_heartbeat_resources; + for (const auto& resource : message.resources()) { + if (!names_added_removed.insert(resource.name()).second) { + throw EnvoyException( + fmt::format("duplicate name {} found among added/updated resources", resource.name())); + } + if (isHeartbeatResponse(resource)) { + continue; + } + non_heartbeat_resources.Add()->CopyFrom(resource); + // DeltaDiscoveryResponses for unresolved aliases don't contain an actual resource + if (!resource.has_resource() && resource.aliases_size() > 0) { + continue; + } + if (message.type_url() != resource.resource().type_url()) { + throw EnvoyException(fmt::format("type URL {} embedded in an individual Any does not match " + "the message-wide type URL {} in DeltaDiscoveryResponse {}", + resource.resource().type_url(), message.type_url(), + message.DebugString())); + } + } + for (const auto& name : message.removed_resources()) { + if (!names_added_removed.insert(name).second) { + throw EnvoyException( + fmt::format("duplicate name {} found in the union of added+removed resources", name)); + } + } + + { + const auto scoped_update = ttl_.scopedTtlUpdate(); + if (requested_resource_state_.contains(Wildcard)) { + for (const auto& resource : message.resources()) { + addResourceStateFromServer(resource); + } + } else { + // We are not subscribed to wildcard, so we only take resources that we explicitly requested + // and ignore the others. + for (const auto& resource : message.resources()) { + if (requested_resource_state_.contains(resource.name())) { + addResourceStateFromServer(resource); + } + } + } + } + + watch_map_.onConfigUpdate(non_heartbeat_resources, message.removed_resources(), + message.system_version_info()); + + // If a resource is gone, there is no longer a meaningful version for it that makes sense to + // provide to the server upon stream reconnect: either it will continue to not exist, in which + // case saying nothing is fine, or the server will bring back something new, which we should + // receive regardless (which is the logic that not specifying a version will get you). + // + // So, leave the version map entry present but blank if we are still interested in the resource. + // It will be left out of initial_resource_versions messages, but will remind us to explicitly + // tell the server "I'm cancelling my subscription" when we lose interest. In case of resources + // received as a part of the wildcard subscription or resources we already lost interest in, we + // just drop them. + for (const auto& resource_name : message.removed_resources()) { + if (auto maybe_resource = getRequestedResourceState(resource_name); + maybe_resource.has_value()) { + maybe_resource->setAsWaitingForServer(); + } else if (const auto erased_count = ambiguous_resource_state_.erase(resource_name); + erased_count == 0) { + wildcard_resource_state_.erase(resource_name); + } + } + ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, + message.resources().size(), message.removed_resources().size()); +} + +void NewDeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) { + // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK. + ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); + ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); + ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); +} + +void NewDeltaSubscriptionState::handleEstablishmentFailure() { + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, + nullptr); +} + +envoy::service::discovery::v3::DeltaDiscoveryRequest +NewDeltaSubscriptionState::getNextRequestAckless() { + envoy::service::discovery::v3::DeltaDiscoveryRequest request; + must_send_discovery_request_ = false; + if (!any_request_sent_yet_in_current_stream_) { + any_request_sent_yet_in_current_stream_ = true; + const bool is_legacy_wildcard = isInitialRequestForLegacyWildcard(); + // initial_resource_versions "must be populated for first request in a stream". + // Also, since this might be a new server, we must explicitly state *all* of our subscription + // interest. + for (auto const& [resource_name, resource_state] : requested_resource_state_) { + // Populate initial_resource_versions with the resource versions we currently have. + // Resources we are interested in, but are still waiting to get any version of from the + // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) + if (!resource_state.isWaitingForServer()) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_state.version(); + } + // We are going over a list of resources that we are interested in, so add them to + // resource_names_subscribe. + names_added_.insert(resource_name); + } + for (auto const& [resource_name, resource_version] : wildcard_resource_state_) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_version; + } + for (auto const& [resource_name, resource_version] : ambiguous_resource_state_) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_version; + } + // If this is a legacy wildcard request, then make sure that the resource_names_subscribe is + // empty. + if (is_legacy_wildcard) { + names_added_.clear(); + } + names_removed_.clear(); + } + std::copy(names_added_.begin(), names_added_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_subscribe())); + std::copy(names_removed_.begin(), names_removed_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_unsubscribe())); + names_added_.clear(); + names_removed_.clear(); + + request.set_type_url(type_url_); + request.mutable_node()->MergeFrom(local_info_.node()); + return request; +} + +bool NewDeltaSubscriptionState::isInitialRequestForLegacyWildcard() { + if (in_initial_legacy_wildcard_) { + requested_resource_state_.insert_or_assign(Wildcard, ResourceState::waitingForServer()); + ASSERT(requested_resource_state_.contains(Wildcard)); + ASSERT(!wildcard_resource_state_.contains(Wildcard)); + ASSERT(!ambiguous_resource_state_.contains(Wildcard)); + return true; + } + + // If we are here, this means that we lost our initial wildcard mode, because we subscribed to + // something in the past. We could still be in the situation now that all we are subscribed to now + // is wildcard resource, so in such case try to send a legacy wildcard subscription request + // anyway. For this to happen, two conditions need to apply: + // + // 1. No change in interest. + // 2. The only requested resource is Wildcard resource. + // + // The invariant of the code here is that this code is executed only when + // subscriptionUpdatePending actually returns true, which in our case can only happen if the + // requested resources state_ isn't empty. + ASSERT(!requested_resource_state_.empty()); + + // If our subscription interest didn't change then the first condition for using legacy wildcard + // subscription is met. + if (!names_added_.empty() || !names_removed_.empty()) { + return false; + } + // If we requested only a wildcard resource then the second condition for using legacy wildcard + // condition is met. + return requested_resource_state_.size() == 1 && + requested_resource_state_.begin()->first == Wildcard; +} + +envoy::service::discovery::v3::DeltaDiscoveryRequest +NewDeltaSubscriptionState::getNextRequestWithAck(const UpdateAck& ack) { + envoy::service::discovery::v3::DeltaDiscoveryRequest request = getNextRequestAckless(); + request.set_response_nonce(ack.nonce_); + if (ack.error_detail_.code() != Grpc::Status::WellKnownGrpcStatus::Ok) { + // Don't needlessly make the field present-but-empty if status is ok. + request.mutable_error_detail()->CopyFrom(ack.error_detail_); + } + return request; +} + +void NewDeltaSubscriptionState::addResourceStateFromServer( + const envoy::service::discovery::v3::Resource& resource) { + if (resource.has_ttl()) { + ttl_.add(std::chrono::milliseconds(DurationUtil::durationToMilliseconds(resource.ttl())), + resource.name()); + } else { + ttl_.clear(resource.name()); + } + + if (auto maybe_resource = getRequestedResourceState(resource.name()); + maybe_resource.has_value()) { + // It is a resource that we requested. + maybe_resource->setVersion(resource.version()); + ASSERT(requested_resource_state_.contains(resource.name())); + ASSERT(!wildcard_resource_state_.contains(resource.name())); + ASSERT(!ambiguous_resource_state_.contains(resource.name())); + } else { + // It is a resource that is a part of our wildcard request. + wildcard_resource_state_.insert({resource.name(), resource.version()}); + // The resource could be ambiguous before, but now the ambiguity + // is resolved. + ambiguous_resource_state_.erase(resource.name()); + ASSERT(!requested_resource_state_.contains(resource.name())); + ASSERT(wildcard_resource_state_.contains(resource.name())); + ASSERT(!ambiguous_resource_state_.contains(resource.name())); + } +} + +OptRef +NewDeltaSubscriptionState::getRequestedResourceState(absl::string_view resource_name) { + auto itr = requested_resource_state_.find(resource_name); + if (itr == requested_resource_state_.end()) { + return {}; + } + return {itr->second}; +} + +OptRef +NewDeltaSubscriptionState::getRequestedResourceState(absl::string_view resource_name) const { + auto itr = requested_resource_state_.find(resource_name); + if (itr == requested_resource_state_.end()) { + return {}; + } + return {itr->second}; +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/new_delta_subscription_state.h b/source/common/config/new_delta_subscription_state.h new file mode 100644 index 000000000000..9ef841cffb22 --- /dev/null +++ b/source/common/config/new_delta_subscription_state.h @@ -0,0 +1,179 @@ +#pragma once + +#include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" +#include "envoy/grpc/status.h" +#include "envoy/local_info/local_info.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" +#include "source/common/config/api_version.h" +#include "source/common/config/pausable_ack_queue.h" +#include "source/common/config/ttl.h" +#include "source/common/config/watch_map.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Config { + +// Tracks the xDS protocol state of an individual ongoing delta xDS session, i.e. a single type_url. +// There can be multiple NewDeltaSubscriptionStates active. They will always all be blissfully +// unaware of each other's existence, even when their messages are being multiplexed together by +// ADS. +// +// There are two scenarios which affect how NewDeltaSubscriptionState manages the resources. First +// scenario is when we are subscribed to a wildcard resource, and other scenario is when we are not. +// +// Delta subscription state also divides the resources it cached into three categories: requested, +// wildcard and ambiguous. +// +// The "requested" category is for resources that we have explicitly asked for (either through the +// initial set of resources or through the on-demand mechanism). Resources in this category are in +// one of two states: "complete" and "waiting for server". +// +// "Complete" resources are resources about which the server sent us the information we need (for +// now - just resource version). +// +// The "waiting for server" state is either for resources that we have just requested, but we still +// didn't receive any version information from the server, or for the "complete" resources that, +// according to the server, are gone, but we are still interested in them - in such case we strip +// the information from the resource. +// +// The "wildcard" category is for resources that we are not explicitly interested in, but we are +// indirectly interested through the subscription to the wildcard resource. +// +// The "ambiguous" category is for resources that we stopped being interested in, but we may still +// be interested indirectly through the wildcard subscription. This situation happens because of the +// xDS protocol limitation - the server isn't able to tell us that the resource we subscribed to is +// also a part of our wildcard subscription. So when we unsubscribe from the resource, we need to +// receive a confirmation from the server whether to keep the resource (which means that it was a +// part of our wildcard subscription) or to drop it. +// +// Please refer to drawings (non-wildcard-resource-state-machine.png and +// (wildcard-resource-state-machine.png) for visual depictions of the resource state machine. +// +// In the "no wildcard subscription" scenario all the cached resources should be in the "requested" +// category. Resources are added to the category upon the explicit request and dropped when we +// explicitly unsubscribe from it. Transitions between "complete" and "waiting for server" happen +// when we receive messages from the server - if a resource in the message is in "added resources" +// list (thus contains version information), the resource becomes "complete". If the resource in the +// message is in "removed resources" list, it changes into the "waiting for server" state. If a +// server sends us a resource that we didn't request, it's going to be ignored. +// +// In the "wildcard subscription" scenario, "requested" category is the same as in "no wildcard +// subscription" scenario, with one exception - the unsubscribed "complete" resource is not removed +// from the cache, but it's moved to the "ambiguous" resources instead. At this point we are waiting +// for the server to tell us that this resource should be either moved to the "wildcard" resources, +// or dropped. Resources in "wildcard" category are only added there or dropped from there by the +// server. Resources from both "wildcard" and "ambiguous" categories can become "requested" +// "complete" resources if we subscribe to them again. +// +// The delta subscription state transitions between the two scenarios depending on whether we are +// subscribed to wildcard resource or not. Nothing special happens when we transition from "no +// wildcard subscription" to "wildcard subscription" scenario, but when transitioning in the other +// direction, we drop all the resources in "wildcard" and "ambiguous" categories. +class NewDeltaSubscriptionState : public Logger::Loggable { +public: + NewDeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher); + + // Update which resources we're interested in subscribing to. + void updateSubscriptionInterest(const absl::flat_hash_set& cur_added, + const absl::flat_hash_set& cur_removed); + void setMustSendDiscoveryRequest() { must_send_discovery_request_ = true; } + + // Whether there was a change in our subscription interest we have yet to inform the server of. + bool subscriptionUpdatePending() const; + + void markStreamFresh() { any_request_sent_yet_in_current_stream_ = false; } + + UpdateAck handleResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); + + void handleEstablishmentFailure(); + + // Returns the next gRPC request proto to be sent off to the server, based on this object's + // understanding of the current protocol state, and new resources that Envoy wants to request. + envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestAckless(); + + // The WithAck version first calls the Ack-less version, then adds in the passed-in ack. + envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestWithAck(const UpdateAck& ack); + + NewDeltaSubscriptionState(const NewDeltaSubscriptionState&) = delete; + NewDeltaSubscriptionState& operator=(const NewDeltaSubscriptionState&) = delete; + +private: + bool isHeartbeatResponse(const envoy::service::discovery::v3::Resource& resource) const; + void handleGoodResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); + void handleBadResponse(const EnvoyException& e, UpdateAck& ack); + + class ResourceState { + public: + // Builds a ResourceState in the waitingForServer state. + ResourceState() = default; + // Builds a ResourceState with a specific version + ResourceState(absl::string_view version) : version_(version) {} + // Self-documenting alias of default constructor. + static ResourceState waitingForServer() { return ResourceState(); } + // Self-documenting alias of constructor with version. + static ResourceState withVersion(absl::string_view version) { return ResourceState(version); } + + // If true, we currently have no version of this resource - we are waiting for the server to + // provide us with one. + bool isWaitingForServer() const { return version_ == absl::nullopt; } + + void setAsWaitingForServer() { version_ = absl::nullopt; } + void setVersion(absl::string_view version) { version_ = std::string(version); } + + // Must not be called if waitingForServer() == true. + std::string version() const { + ASSERT(version_.has_value()); + return version_.value_or(""); + } + + private: + absl::optional version_; + }; + + void addResourceStateFromServer(const envoy::service::discovery::v3::Resource& resource); + OptRef getRequestedResourceState(absl::string_view resource_name); + OptRef getRequestedResourceState(absl::string_view resource_name) const; + + bool isInitialRequestForLegacyWildcard(); + + // A map from resource name to per-resource version. The keys of this map are exactly the resource + // names we are currently interested in. Those in the waitingForServer state currently don't have + // any version for that resource: we need to inform the server if we lose interest in them, but we + // also need to *not* include them in the initial_resource_versions map upon a reconnect. + absl::node_hash_map requested_resource_state_; + // A map from resource name to per-resource version. The keys of this map are resource names we + // have received as a part of the wildcard subscription. + absl::node_hash_map wildcard_resource_state_; + // Used for storing resources that we lost interest in, but could + // also be a part of wildcard subscription. + absl::node_hash_map ambiguous_resource_state_; + + // Not all xDS resources supports heartbeats due to there being specific information encoded in + // an empty response, which is indistinguishable from a heartbeat in some cases. For now we just + // disable heartbeats for these resources (currently only VHDS). + const bool supports_heartbeats_; + TtlManager ttl_; + + const std::string type_url_; + UntypedConfigUpdateCallbacks& watch_map_; + const LocalInfo::LocalInfo& local_info_; + + bool in_initial_legacy_wildcard_{true}; + bool any_request_sent_yet_in_current_stream_{}; + bool must_send_discovery_request_{}; + + // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. + // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching. + // Feel free to change to an unordered container once we figure out how to make it work. + std::set names_added_; + std::set names_removed_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 085edec3150d..d05cfdd1a35d 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -158,7 +158,7 @@ GrpcMuxWatchPtr NewGrpcMuxImpl::addWatch(const std::string& type_url, auto entry = subscriptions_.find(type_url); if (entry == subscriptions_.end()) { // We don't yet have a subscription for type_url! Make one! - addSubscription(type_url, options.use_namespace_matching_, resources.empty()); + addSubscription(type_url, options.use_namespace_matching_); return addWatch(type_url, resources, callbacks, resource_decoder, options); } @@ -230,11 +230,10 @@ void NewGrpcMuxImpl::removeWatch(const std::string& type_url, Watch* watch) { entry->second->watch_map_.removeWatch(watch); } -void NewGrpcMuxImpl::addSubscription(const std::string& type_url, const bool use_namespace_matching, - const bool wildcard) { - subscriptions_.emplace(type_url, std::make_unique(type_url, local_info_, - use_namespace_matching, - dispatcher_, wildcard)); +void NewGrpcMuxImpl::addSubscription(const std::string& type_url, + const bool use_namespace_matching) { + subscriptions_.emplace(type_url, std::make_unique( + type_url, local_info_, use_namespace_matching, dispatcher_)); subscription_ordering_.emplace_back(type_url); } diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 5c2940ebfe8e..84e5d223df63 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -81,10 +81,9 @@ class NewGrpcMuxImpl struct SubscriptionStuff { SubscriptionStuff(const std::string& type_url, const LocalInfo::LocalInfo& local_info, - const bool use_namespace_matching, Event::Dispatcher& dispatcher, - const bool wildcard) + const bool use_namespace_matching, Event::Dispatcher& dispatcher) : watch_map_(use_namespace_matching), - sub_state_(type_url, watch_map_, local_info, dispatcher, wildcard) {} + sub_state_(type_url, watch_map_, local_info, dispatcher) {} WatchMap watch_map_; DeltaSubscriptionState sub_state_; @@ -138,8 +137,7 @@ class NewGrpcMuxImpl const SubscriptionOptions& options); // Adds a subscription for the type_url to the subscriptions map and order list. - void addSubscription(const std::string& type_url, bool use_namespace_matching, - const bool wildcard); + void addSubscription(const std::string& type_url, bool use_namespace_matching); void trySendDiscoveryRequests(); diff --git a/source/common/config/non-wildcard-resource-state-machine.png b/source/common/config/non-wildcard-resource-state-machine.png new file mode 100644 index 000000000000..999814f6d142 Binary files /dev/null and b/source/common/config/non-wildcard-resource-state-machine.png differ diff --git a/source/common/config/old_delta_subscription_state.cc b/source/common/config/old_delta_subscription_state.cc new file mode 100644 index 000000000000..8a4b9272c30e --- /dev/null +++ b/source/common/config/old_delta_subscription_state.cc @@ -0,0 +1,248 @@ +#include "source/common/config/old_delta_subscription_state.h" + +#include "envoy/event/dispatcher.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/common/hash.h" +#include "source/common/config/utility.h" +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace Config { + +OldDeltaSubscriptionState::OldDeltaSubscriptionState(std::string type_url, + UntypedConfigUpdateCallbacks& watch_map, + const LocalInfo::LocalInfo& local_info, + Event::Dispatcher& dispatcher) + // TODO(snowp): Hard coding VHDS here is temporary until we can move it away from relying on + // empty resources as updates. + : supports_heartbeats_(type_url != "envoy.config.route.v3.VirtualHost"), + ttl_( + [this](const auto& expired) { + Protobuf::RepeatedPtrField removed_resources; + for (const auto& resource : expired) { + setResourceWaitingForServer(resource); + removed_resources.Add(std::string(resource)); + } + + watch_map_.onConfigUpdate({}, removed_resources, ""); + }, + dispatcher, dispatcher.timeSource()), + type_url_(std::move(type_url)), watch_map_(watch_map), local_info_(local_info), + dispatcher_(dispatcher) {} + +void OldDeltaSubscriptionState::updateSubscriptionInterest( + const absl::flat_hash_set& cur_added, + const absl::flat_hash_set& cur_removed) { + if (!wildcard_set_) { + wildcard_set_ = true; + wildcard_ = cur_added.empty() && cur_removed.empty(); + } + for (const auto& a : cur_added) { + setResourceWaitingForServer(a); + // If interest in a resource is removed-then-added (all before a discovery request + // can be sent), we must treat it as a "new" addition: our user may have forgotten its + // copy of the resource after instructing us to remove it, and need to be reminded of it. + names_removed_.erase(a); + names_added_.insert(a); + } + for (const auto& r : cur_removed) { + removeResourceState(r); + // Ideally, when interest in a resource is added-then-removed in between requests, + // we would avoid putting a superfluous "unsubscribe [resource that was never subscribed]" + // in the request. However, the removed-then-added case *does* need to go in the request, + // and due to how we accomplish that, it's difficult to distinguish remove-add-remove from + // add-remove (because "remove-add" has to be treated as equivalent to just "add"). + names_added_.erase(r); + names_removed_.insert(r); + } +} + +// Not having sent any requests yet counts as an "update pending" since you're supposed to resend +// the entirety of your interest at the start of a stream, even if nothing has changed. +bool OldDeltaSubscriptionState::subscriptionUpdatePending() const { + return !names_added_.empty() || !names_removed_.empty() || + !any_request_sent_yet_in_current_stream_ || must_send_discovery_request_; +} + +UpdateAck OldDeltaSubscriptionState::handleResponse( + const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { + // We *always* copy the response's nonce into the next request, even if we're going to make that + // request a NACK by setting error_detail. + UpdateAck ack(message.nonce(), type_url_); + TRY_ASSERT_MAIN_THREAD { handleGoodResponse(message); } + END_TRY + catch (const EnvoyException& e) { + handleBadResponse(e, ack); + } + return ack; +} + +bool OldDeltaSubscriptionState::isHeartbeatResponse( + const envoy::service::discovery::v3::Resource& resource) const { + if (!supports_heartbeats_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.vhds_heartbeats")) { + return false; + } + const auto itr = resource_state_.find(resource.name()); + if (itr == resource_state_.end()) { + return false; + } + + return !resource.has_resource() && !itr->second.waitingForServer() && + resource.version() == itr->second.version(); +} + +void OldDeltaSubscriptionState::handleGoodResponse( + const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) { + absl::flat_hash_set names_added_removed; + Protobuf::RepeatedPtrField non_heartbeat_resources; + for (const auto& resource : message.resources()) { + if (!names_added_removed.insert(resource.name()).second) { + throw EnvoyException( + fmt::format("duplicate name {} found among added/updated resources", resource.name())); + } + if (isHeartbeatResponse(resource)) { + continue; + } + non_heartbeat_resources.Add()->CopyFrom(resource); + // DeltaDiscoveryResponses for unresolved aliases don't contain an actual resource + if (!resource.has_resource() && resource.aliases_size() > 0) { + continue; + } + if (message.type_url() != resource.resource().type_url()) { + throw EnvoyException(fmt::format("type URL {} embedded in an individual Any does not match " + "the message-wide type URL {} in DeltaDiscoveryResponse {}", + resource.resource().type_url(), message.type_url(), + message.DebugString())); + } + } + for (const auto& name : message.removed_resources()) { + if (!names_added_removed.insert(name).second) { + throw EnvoyException( + fmt::format("duplicate name {} found in the union of added+removed resources", name)); + } + } + + { + const auto scoped_update = ttl_.scopedTtlUpdate(); + for (const auto& resource : message.resources()) { + if (wildcard_ || resource_state_.contains(resource.name())) { + // Only consider tracked resources. + // NOTE: This is not gonna work for xdstp resources with glob resource matching. + addResourceState(resource); + } + } + } + + watch_map_.onConfigUpdate(non_heartbeat_resources, message.removed_resources(), + message.system_version_info()); + + // If a resource is gone, there is no longer a meaningful version for it that makes sense to + // provide to the server upon stream reconnect: either it will continue to not exist, in which + // case saying nothing is fine, or the server will bring back something new, which we should + // receive regardless (which is the logic that not specifying a version will get you). + // + // So, leave the version map entry present but blank. It will be left out of + // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm + // cancelling my subscription" when we lose interest. + for (const auto& resource_name : message.removed_resources()) { + if (resource_names_.find(resource_name) != resource_names_.end()) { + setResourceWaitingForServer(resource_name); + } + } + ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, + message.resources().size(), message.removed_resources().size()); +} + +void OldDeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAck& ack) { + // Note that error_detail being set is what indicates that a DeltaDiscoveryRequest is a NACK. + ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); + ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); + ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); +} + +void OldDeltaSubscriptionState::handleEstablishmentFailure() { + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, + nullptr); +} + +envoy::service::discovery::v3::DeltaDiscoveryRequest +OldDeltaSubscriptionState::getNextRequestAckless() { + envoy::service::discovery::v3::DeltaDiscoveryRequest request; + must_send_discovery_request_ = false; + if (!any_request_sent_yet_in_current_stream_) { + any_request_sent_yet_in_current_stream_ = true; + // initial_resource_versions "must be populated for first request in a stream". + // Also, since this might be a new server, we must explicitly state *all* of our subscription + // interest. + for (auto const& [resource_name, resource_state] : resource_state_) { + // Populate initial_resource_versions with the resource versions we currently have. + // Resources we are interested in, but are still waiting to get any version of from the + // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) + if (!resource_state.waitingForServer()) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_state.version(); + } + // As mentioned above, fill resource_names_subscribe with everything, including names we + // have yet to receive any resource for unless this is a wildcard subscription, for which + // the first request on a stream must be without any resource names. + if (!wildcard_) { + names_added_.insert(resource_name); + } + } + // Wildcard subscription initial requests must have no resource_names_subscribe. + if (wildcard_) { + names_added_.clear(); + } + names_removed_.clear(); + } + std::copy(names_added_.begin(), names_added_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_subscribe())); + std::copy(names_removed_.begin(), names_removed_.end(), + Protobuf::RepeatedFieldBackInserter(request.mutable_resource_names_unsubscribe())); + names_added_.clear(); + names_removed_.clear(); + + request.set_type_url(type_url_); + request.mutable_node()->MergeFrom(local_info_.node()); + return request; +} + +envoy::service::discovery::v3::DeltaDiscoveryRequest +OldDeltaSubscriptionState::getNextRequestWithAck(const UpdateAck& ack) { + envoy::service::discovery::v3::DeltaDiscoveryRequest request = getNextRequestAckless(); + request.set_response_nonce(ack.nonce_); + if (ack.error_detail_.code() != Grpc::Status::WellKnownGrpcStatus::Ok) { + // Don't needlessly make the field present-but-empty if status is ok. + request.mutable_error_detail()->CopyFrom(ack.error_detail_); + } + return request; +} + +void OldDeltaSubscriptionState::addResourceState( + const envoy::service::discovery::v3::Resource& resource) { + if (resource.has_ttl()) { + ttl_.add(std::chrono::milliseconds(DurationUtil::durationToMilliseconds(resource.ttl())), + resource.name()); + } else { + ttl_.clear(resource.name()); + } + + resource_state_[resource.name()] = ResourceState(resource); + resource_names_.insert(resource.name()); +} + +void OldDeltaSubscriptionState::setResourceWaitingForServer(const std::string& resource_name) { + resource_state_[resource_name] = ResourceState(); + resource_names_.insert(resource_name); +} + +void OldDeltaSubscriptionState::removeResourceState(const std::string& resource_name) { + resource_state_.erase(resource_name); + resource_names_.erase(resource_name); +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/old_delta_subscription_state.h b/source/common/config/old_delta_subscription_state.h new file mode 100644 index 000000000000..f8aef137f133 --- /dev/null +++ b/source/common/config/old_delta_subscription_state.h @@ -0,0 +1,123 @@ +#pragma once + +#include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" +#include "envoy/grpc/status.h" +#include "envoy/local_info/local_info.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" +#include "source/common/config/api_version.h" +#include "source/common/config/pausable_ack_queue.h" +#include "source/common/config/ttl.h" +#include "source/common/config/watch_map.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Config { + +// Tracks the xDS protocol state of an individual ongoing delta xDS session, i.e. a single type_url. +// There can be multiple OldDeltaSubscriptionStates active. They will always all be +// blissfully unaware of each other's existence, even when their messages are +// being multiplexed together by ADS. +class OldDeltaSubscriptionState : public Logger::Loggable { +public: + OldDeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher); + + // Update which resources we're interested in subscribing to. + void updateSubscriptionInterest(const absl::flat_hash_set& cur_added, + const absl::flat_hash_set& cur_removed); + void setMustSendDiscoveryRequest() { must_send_discovery_request_ = true; } + + // Whether there was a change in our subscription interest we have yet to inform the server of. + bool subscriptionUpdatePending() const; + + void markStreamFresh() { any_request_sent_yet_in_current_stream_ = false; } + + UpdateAck handleResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); + + void handleEstablishmentFailure(); + + // Returns the next gRPC request proto to be sent off to the server, based on this object's + // understanding of the current protocol state, and new resources that Envoy wants to request. + envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestAckless(); + + // The WithAck version first calls the Ack-less version, then adds in the passed-in ack. + envoy::service::discovery::v3::DeltaDiscoveryRequest getNextRequestWithAck(const UpdateAck& ack); + + OldDeltaSubscriptionState(const OldDeltaSubscriptionState&) = delete; + OldDeltaSubscriptionState& operator=(const OldDeltaSubscriptionState&) = delete; + +private: + bool isHeartbeatResponse(const envoy::service::discovery::v3::Resource& resource) const; + void handleGoodResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message); + void handleBadResponse(const EnvoyException& e, UpdateAck& ack); + + class ResourceState { + public: + ResourceState(const envoy::service::discovery::v3::Resource& resource) + : version_(resource.version()) {} + + // Builds a ResourceState in the waitingForServer state. + ResourceState() = default; + + // If true, we currently have no version of this resource - we are waiting for the server to + // provide us with one. + bool waitingForServer() const { return version_ == absl::nullopt; } + + // Must not be called if waitingForServer() == true. + std::string version() const { + ASSERT(version_.has_value()); + return version_.value_or(""); + } + + private: + absl::optional version_; + }; + + // Use these helpers to ensure resource_state_ and resource_names_ get updated together. + void addResourceState(const envoy::service::discovery::v3::Resource& resource); + void setResourceWaitingForServer(const std::string& resource_name); + void removeResourceState(const std::string& resource_name); + + void populateDiscoveryRequest(envoy::service::discovery::v3::DeltaDiscoveryResponse& request); + + // A map from resource name to per-resource version. The keys of this map are exactly the resource + // names we are currently interested in. Those in the waitingForServer state currently don't have + // any version for that resource: we need to inform the server if we lose interest in them, but we + // also need to *not* include them in the initial_resource_versions map upon a reconnect. + absl::node_hash_map resource_state_; + + // Not all xDS resources supports heartbeats due to there being specific information encoded in + // an empty response, which is indistinguishable from a heartbeat in some cases. For now we just + // disable heartbeats for these resources (currently only VHDS). + const bool supports_heartbeats_; + TtlManager ttl_; + // The keys of resource_versions_. Only tracked separately because std::map does not provide an + // iterator into just its keys. + absl::flat_hash_set resource_names_; + + const std::string type_url_; + // Is the subscription is for a wildcard request. + bool wildcard_set_{}; + bool wildcard_{}; + UntypedConfigUpdateCallbacks& watch_map_; + const LocalInfo::LocalInfo& local_info_; + Event::Dispatcher& dispatcher_; + std::chrono::milliseconds init_fetch_timeout_; + + bool any_request_sent_yet_in_current_stream_{}; + bool must_send_discovery_request_{}; + + // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. + // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching. + // Feel free to change to an unordered container once we figure out how to make it work. + std::set names_added_; + std::set names_removed_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/protobuf_link_hacks.h b/source/common/config/protobuf_link_hacks.h index 991291789ef9..c5eff8e5fb8d 100644 --- a/source/common/config/protobuf_link_hacks.h +++ b/source/common/config/protobuf_link_hacks.h @@ -3,6 +3,7 @@ #include "envoy/service/cluster/v3/cds.pb.h" #include "envoy/service/discovery/v3/ads.pb.h" #include "envoy/service/endpoint/v3/eds.pb.h" +#include "envoy/service/endpoint/v3/leds.pb.h" #include "envoy/service/extension/v3/config_discovery.pb.h" #include "envoy/service/health/v3/hds.pb.h" #include "envoy/service/listener/v3/lds.pb.h" @@ -24,6 +25,7 @@ const envoy::service::listener::v3::LdsDummy _lds_dummy_v3; const envoy::service::route::v3::RdsDummy _rds_dummy_v3; const envoy::service::cluster::v3::CdsDummy _cds_dummy_v3; const envoy::service::endpoint::v3::EdsDummy _eds_dummy_v3; +const envoy::service::endpoint::v3::LedsDummy _leds_dummy_v3; const envoy::service::route::v3::SrdsDummy _srds_dummy_v3; const envoy::service::extension::v3::EcdsDummy _ecds_dummy_v3; const envoy::service::runtime::v3::RtdsDummy _rtds_dummy_v3; diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 12a41c33a428..b36f139c3516 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -9,6 +9,7 @@ #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/type_to_endpoint.h" #include "source/common/config/utility.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/config/xds_resource.h" #include "source/common/http/utility.h" #include "source/common/protobuf/protobuf.h" @@ -55,28 +56,51 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url), type_url, callbacks, resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), validation_visitor_); - case envoy::config::core::v3::ApiConfigSource::GRPC: + case envoy::config::core::v3::ApiConfigSource::GRPC: { + GrpcMuxSharedPtr mux; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.unified_mux")) { + mux = std::make_shared( + Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, + scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, sotwGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_, + api_config_source.set_node_on_first_message_only()); + } else { + mux = std::make_shared( + local_info_, + Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, + scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, sotwGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), + api_config_source.set_node_on_first_message_only()); + } return std::make_unique( - std::make_shared( - local_info_, - Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), - api_config_source, scope, true) - ->createUncachedRawAsyncClient(), - dispatcher_, sotwGrpcMethod(type_url), api_.randomGenerator(), scope, - Utility::parseRateLimitSettings(api_config_source), - api_config_source.set_node_on_first_message_only()), - callbacks, resource_decoder, stats, type_url, dispatcher_, + std::move(mux), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false, options); + } case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { + GrpcMuxSharedPtr mux; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.unified_mux")) { + mux = std::make_shared( + Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, + scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_, + api_config_source.set_node_on_first_message_only()); + } else { + mux = std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), + api_config_source, scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_); + } return std::make_unique( - std::make_shared( - Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), - api_config_source, scope, true) - ->createUncachedRawAsyncClient(), - dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, - Utility::parseRateLimitSettings(api_config_source), local_info_), - callbacks, resource_decoder, stats, type_url, dispatcher_, + std::move(mux), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false, options); } default: @@ -115,36 +139,55 @@ SubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl( fmt::format("xdstp:// type does not match {} in {}", resource_type, Config::XdsResourceIdentifier::encodeUrl(collection_locator))); } - const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), - api_config_source); + switch (config.config_source_specifier_case()) { + case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: { + const envoy::config::core::v3::ApiConfigSource& api_config_source = + config.api_config_source(); + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), + api_config_source); - SubscriptionOptions options; - // All Envoy collections currently are xDS resource graph roots and require node context - // parameters. - options.add_xdstp_node_context_params_ = true; - switch (api_config_source.api_type()) { - case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { - const std::string type_url = TypeUtil::descriptorFullNameToTypeUrl(resource_type); - return std::make_unique( - collection_locator, - std::make_shared( - Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), - api_config_source, scope, true) - ->createUncachedRawAsyncClient(), - dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, - Utility::parseRateLimitSettings(api_config_source), local_info_), - callbacks, resource_decoder, stats, dispatcher_, - Utility::configSourceInitialFetchTimeout(config), false, options); + SubscriptionOptions options; + // All Envoy collections currently are xDS resource graph roots and require node context + // parameters. + options.add_xdstp_node_context_params_ = true; + switch (api_config_source.api_type()) { + case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { + const std::string type_url = TypeUtil::descriptorFullNameToTypeUrl(resource_type); + return std::make_unique( + collection_locator, + std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), + api_config_source, scope, true) + ->createUncachedRawAsyncClient(), + dispatcher_, deltaGrpcMethod(type_url), api_.randomGenerator(), scope, + Utility::parseRateLimitSettings(api_config_source), local_info_), + callbacks, resource_decoder, stats, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false, options); + } + case envoy::config::core::v3::ApiConfigSource::AGGREGATED_DELTA_GRPC: { + return std::make_unique( + collection_locator, cm_.adsMux(), callbacks, resource_decoder, stats, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false, options); + } + default: + throw EnvoyException(fmt::format("Unknown xdstp:// transport API type in {}", + api_config_source.DebugString())); + } } - case envoy::config::core::v3::ApiConfigSource::AGGREGATED_DELTA_GRPC: { + case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kAds: { + // TODO(adisuissa): verify that the ADS is set up in delta-xDS mode. + SubscriptionOptions options; + // All Envoy collections currently are xDS resource graph roots and require node context + // parameters. + options.add_xdstp_node_context_params_ = true; return std::make_unique( collection_locator, cm_.adsMux(), callbacks, resource_decoder, stats, dispatcher_, - Utility::configSourceInitialFetchTimeout(config), false, options); + Utility::configSourceInitialFetchTimeout(config), true, options); } default: - throw EnvoyException(fmt::format("Unknown xdstp:// transport API type in {}", - api_config_source.DebugString())); + throw EnvoyException("Missing or not supported config source specifier in " + "envoy::config::core::v3::ConfigSource for a collection. Only ADS and " + "gRPC in delta-xDS mode are supported."); } } default: diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 655c429f9b4b..4ee233d118ae 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -55,6 +55,7 @@ TypeUrlToV3ServiceMap* buildTypeUrlToServiceMap() { "envoy.service.secret.v3.SecretDiscoveryService", "envoy.service.cluster.v3.ClusterDiscoveryService", "envoy.service.endpoint.v3.EndpointDiscoveryService", + "envoy.service.endpoint.v3.LocalityEndpointDiscoveryService", "envoy.service.listener.v3.ListenerDiscoveryService", "envoy.service.runtime.v3.RuntimeDiscoveryService", "envoy.service.extension.v3.ExtensionConfigDiscoveryService", diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 912ea6cb8845..3c97063d91fa 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -254,6 +254,8 @@ void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config, static const std::string struct_type = ProtobufWkt::Struct::default_instance().GetDescriptor()->full_name(); static const std::string typed_struct_type = + xds::type::v3::TypedStruct::default_instance().GetDescriptor()->full_name(); + static const std::string legacy_typed_struct_type = udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name(); if (!typed_config.value().empty()) { @@ -262,6 +264,17 @@ void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config, absl::string_view type = TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url()); if (type == typed_struct_type) { + xds::type::v3::TypedStruct typed_struct; + MessageUtil::unpackTo(typed_config, typed_struct); + // if out_proto is expecting Struct, return directly + if (out_proto.GetDescriptor()->full_name() == struct_type) { + out_proto.CopyFrom(typed_struct.value()); + } else { + // The typed struct might match out_proto, or some earlier version, let + // MessageUtil::jsonConvert sort this out. + MessageUtil::jsonConvert(typed_struct.value(), validation_visitor, out_proto); + } + } else if (type == legacy_typed_struct_type) { udpa::type::v1::TypedStruct typed_struct; MessageUtil::unpackTo(typed_config, typed_struct); // if out_proto is expecting Struct, return directly diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 78db555b2cd7..bdbf9f64b9c5 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -31,10 +31,13 @@ #include "source/common/singleton/const_singleton.h" #include "udpa/type/v1/typed_struct.pb.h" +#include "xds/type/v3/typed_struct.pb.h" namespace Envoy { namespace Config { +constexpr absl::string_view Wildcard = "*"; + /** * Constant Api Type Values, used by envoy::config::core::v3::ApiConfigSource. */ @@ -191,7 +194,7 @@ class Utility { */ template static void checkTransportVersion(const Proto& api_config_source) { const auto transport_api_version = api_config_source.transport_api_version(); - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO || transport_api_version == envoy::config::core::v3::ApiVersion::V2) { Runtime::LoaderSingleton::getExisting()->countDeprecatedFeatureUse(); @@ -329,11 +332,18 @@ class Utility { */ static std::string getFactoryType(const ProtobufWkt::Any& typed_config) { static const std::string& typed_struct_type = + xds::type::v3::TypedStruct::default_instance().GetDescriptor()->full_name(); + static const std::string& legacy_typed_struct_type = udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name(); // Unpack methods will only use the fully qualified type name after the last '/'. // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87 auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url())); if (type == typed_struct_type) { + xds::type::v3::TypedStruct typed_struct; + MessageUtil::unpackTo(typed_config, typed_struct); + // Not handling nested structs or typed structs in typed structs + return std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url())); + } else if (type == legacy_typed_struct_type) { udpa::type::v1::TypedStruct typed_struct; MessageUtil::unpackTo(typed_config, typed_struct); // Not handling nested structs or typed structs in typed structs diff --git a/source/common/config/watch_map.cc b/source/common/config/watch_map.cc index 992fdc35393c..1fc4ed842f86 100644 --- a/source/common/config/watch_map.cc +++ b/source/common/config/watch_map.cc @@ -5,6 +5,7 @@ #include "source/common/common/cleanup.h" #include "source/common/common/utility.h" #include "source/common/config/decoded_resource_impl.h" +#include "source/common/config/utility.h" #include "source/common/config/xds_resource.h" namespace Envoy { @@ -49,7 +50,7 @@ void WatchMap::removeDeferredWatches() { AddedRemoved WatchMap::updateWatchInterest(Watch* watch, const absl::flat_hash_set& update_to_these_names) { - if (update_to_these_names.empty()) { + if (update_to_these_names.empty() || update_to_these_names.contains(Wildcard)) { wildcard_watches_.insert(watch); } else { wildcard_watches_.erase(watch); @@ -110,7 +111,7 @@ absl::flat_hash_set WatchMap::watchesInterestedIn(const std::string& res return ret; } -void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void WatchMap::onConfigUpdate(const std::vector& resources, const std::string& version_info) { if (watches_.empty()) { return; @@ -123,15 +124,11 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField // Build a map from watches, to the set of updated resources that each watch cares about. Each // entry in the map is then a nice little bundle that can be fed directly into the individual // onConfigUpdate()s. - std::vector decoded_resources; absl::flat_hash_map> per_watch_updates; for (const auto& r : resources) { - decoded_resources.emplace_back( - DecodedResourceImpl::fromResource((*watches_.begin())->resource_decoder_, r, version_info)); - const absl::flat_hash_set& interested_in_r = - watchesInterestedIn(decoded_resources.back()->name()); + const absl::flat_hash_set& interested_in_r = watchesInterestedIn(r->name()); for (const auto& interested_watch : interested_in_r) { - per_watch_updates[interested_watch].emplace_back(*decoded_resources.back()); + per_watch_updates[interested_watch].emplace_back(*r); } } @@ -161,6 +158,21 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField } } +void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::string& version_info) { + if (watches_.empty()) { + return; + } + + std::vector decoded_resources; + for (const auto& r : resources) { + decoded_resources.emplace_back( + DecodedResourceImpl::fromResource((*watches_.begin())->resource_decoder_, r, version_info)); + } + + onConfigUpdate(decoded_resources, version_info); +} + void WatchMap::onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, diff --git a/source/common/config/watch_map.h b/source/common/config/watch_map.h index d1139e23e0af..c7a9f14fef0e 100644 --- a/source/common/config/watch_map.h +++ b/source/common/config/watch_map.h @@ -82,6 +82,10 @@ class WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable& resources, const std::string& version_info) override; + + void onConfigUpdate(const std::vector& resources, + const std::string& version_info) override; + void onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, diff --git a/source/common/config/wildcard-resource-state-machine.png b/source/common/config/wildcard-resource-state-machine.png new file mode 100644 index 000000000000..7447a9caaa76 Binary files /dev/null and b/source/common/config/wildcard-resource-state-machine.png differ diff --git a/source/common/config/xds_mux/delta_subscription_state.cc b/source/common/config/xds_mux/delta_subscription_state.cc index dd3b8e686cb7..244149a92ca3 100644 --- a/source/common/config/xds_mux/delta_subscription_state.cc +++ b/source/common/config/xds_mux/delta_subscription_state.cc @@ -13,11 +13,11 @@ namespace XdsMux { DeltaSubscriptionState::DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, - Event::Dispatcher& dispatcher, const bool wildcard) + Event::Dispatcher& dispatcher) : BaseSubscriptionState(std::move(type_url), watch_map, dispatcher), // TODO(snowp): Hard coding VHDS here is temporary until we can move it away from relying on // empty resources as updates. - supports_heartbeats_(type_url_ != "envoy.config.route.v3.VirtualHost"), wildcard_(wildcard) {} + supports_heartbeats_(type_url_ != "envoy.config.route.v3.VirtualHost") {} DeltaSubscriptionState::~DeltaSubscriptionState() = default; @@ -25,7 +25,24 @@ void DeltaSubscriptionState::updateSubscriptionInterest( const absl::flat_hash_set& cur_added, const absl::flat_hash_set& cur_removed) { for (const auto& a : cur_added) { - resource_state_[a] = ResourceState::waitingForServer(); + if (in_initial_legacy_wildcard_ && a != Wildcard) { + in_initial_legacy_wildcard_ = false; + } + // If the requested resource existed as a wildcard resource, + // transition it to requested. Otherwise mark it as a resource + // waiting for the server to receive the version. + if (auto it = wildcard_resource_state_.find(a); it != wildcard_resource_state_.end()) { + requested_resource_state_.insert_or_assign(a, ResourceState::withVersion(it->second)); + wildcard_resource_state_.erase(it); + } else if (it = ambiguous_resource_state_.find(a); it != ambiguous_resource_state_.end()) { + requested_resource_state_.insert_or_assign(a, ResourceState::withVersion(it->second)); + ambiguous_resource_state_.erase(it); + } else { + requested_resource_state_.insert_or_assign(a, ResourceState::waitingForServer()); + } + ASSERT(requested_resource_state_.contains(a)); + ASSERT(!wildcard_resource_state_.contains(a)); + ASSERT(!ambiguous_resource_state_.contains(a)); // If interest in a resource is removed-then-added (all before a discovery request // can be sent), we must treat it as a "new" addition: our user may have forgotten its // copy of the resource after instructing us to remove it, and need to be reminded of it. @@ -33,22 +50,73 @@ void DeltaSubscriptionState::updateSubscriptionInterest( names_added_.insert(a); } for (const auto& r : cur_removed) { - resource_state_.erase(r); + auto actually_erased = false; + // The resource we have lost the interest in could also come from our wildcard subscription. We + // just don't know it at this point. Instead of removing it outright, mark the resource as not + // interesting to us any more and the server will send us an update. If we don't have a wildcard + // subscription then there is no ambiguity and just drop the resource. + if (requested_resource_state_.contains(Wildcard)) { + if (auto it = requested_resource_state_.find(r); it != requested_resource_state_.end()) { + // Wildcard resources always have a version. If our requested resource has no version, it + // won't be a wildcard resource then. If r is Wildcard itself, then it never has a version + // attached to it, so it will not be moved to ambiguous category. + if (!it->second.isWaitingForServer()) { + ambiguous_resource_state_.insert({it->first, it->second.version()}); + } + requested_resource_state_.erase(it); + actually_erased = true; + } + } else { + actually_erased = (requested_resource_state_.erase(r) > 0); + } + ASSERT(!requested_resource_state_.contains(r)); // Ideally, when interest in a resource is added-then-removed in between requests, // we would avoid putting a superfluous "unsubscribe [resource that was never subscribed]" // in the request. However, the removed-then-added case *does* need to go in the request, // and due to how we accomplish that, it's difficult to distinguish remove-add-remove from // add-remove (because "remove-add" has to be treated as equivalent to just "add"). names_added_.erase(r); - names_removed_.insert(r); + if (actually_erased) { + names_removed_.insert(r); + in_initial_legacy_wildcard_ = false; + } + } + // If we unsubscribe from wildcard resource, drop all the resources that came from wildcard from + // cache. + if (cur_removed.contains(Wildcard)) { + wildcard_resource_state_.clear(); + ambiguous_resource_state_.clear(); } } // Not having sent any requests yet counts as an "update pending" since you're supposed to resend // the entirety of your interest at the start of a stream, even if nothing has changed. bool DeltaSubscriptionState::subscriptionUpdatePending() const { - return !names_added_.empty() || !names_removed_.empty() || - !any_request_sent_yet_in_current_stream_ || dynamicContextChanged(); + if (!names_added_.empty() || !names_removed_.empty()) { + return true; + } + // At this point, we have no new resources to subscribe to or any + // resources to unsubscribe from. + if (!any_request_sent_yet_in_current_stream_) { + // If we haven't sent anything on the current stream, but we are actually interested in some + // resource then we obviously need to let the server know about those. + if (!requested_resource_state_.empty()) { + return true; + } + // So there are no new names and we are interested in nothing. This may either mean that we want + // the legacy wildcard subscription to kick in or we actually unsubscribed from everything. If + // the latter is true, then we should not be sending any requests. In such case the initial + // wildcard mode will be false. Otherwise it means that the legacy wildcard request should be + // sent. + return in_initial_legacy_wildcard_; + } + + // At this point, we have no changes in subscription resources and this isn't a first request in + // the stream, so even if there are no resources we are interested in, we can send the request, + // because even if it's empty, it won't be interpreted as legacy wildcard subscription, which can + // only for the first request in the stream. So sending an empty request at this point should be + // harmless. + return dynamicContextChanged(); } bool DeltaSubscriptionState::isHeartbeatResource( @@ -57,13 +125,29 @@ bool DeltaSubscriptionState::isHeartbeatResource( !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.vhds_heartbeats")) { return false; } - const auto itr = resource_state_.find(resource.name()); - if (itr == resource_state_.end()) { + if (resource.has_resource()) { return false; } - return !resource.has_resource() && !itr->second.isWaitingForServer() && - resource.version() == itr->second.version(); + if (const auto maybe_resource = getRequestedResourceState(resource.name()); + maybe_resource.has_value()) { + return !maybe_resource->isWaitingForServer() && resource.version() == maybe_resource->version(); + } + + if (const auto itr = wildcard_resource_state_.find(resource.name()); + itr != wildcard_resource_state_.end()) { + return resource.version() == itr->second; + } + + if (const auto itr = ambiguous_resource_state_.find(resource.name()); + itr != wildcard_resource_state_.end()) { + // In theory we should move the ambiguous resource to wildcard, because probably we shouldn't be + // getting heartbeat responses about resources that we are not interested in, but the server + // could have sent this heartbeat before it learned about our lack of interest in the resource. + return resource.version() == itr->second; + } + + return false; } void DeltaSubscriptionState::handleGoodResponse( @@ -101,8 +185,19 @@ void DeltaSubscriptionState::handleGoodResponse( { const auto scoped_update = ttl_.scopedTtlUpdate(); - for (const auto& resource : message.resources()) { - addResourceState(resource); + if (requested_resource_state_.contains(Wildcard)) { + for (const auto& resource : message.resources()) { + addResourceStateFromServer(resource); + } + } else { + // We are not subscribed to wildcard, so we only take resources that we explicitly requested + // and ignore the others. + // NOTE: This is not gonna work for xdstp resources with glob resource matching. + for (const auto& resource : message.resources()) { + if (requested_resource_state_.contains(resource.name())) { + addResourceStateFromServer(resource); + } + } } } @@ -114,12 +209,18 @@ void DeltaSubscriptionState::handleGoodResponse( // case saying nothing is fine, or the server will bring back something new, which we should // receive regardless (which is the logic that not specifying a version will get you). // - // So, leave the version map entry present but blank. It will be left out of - // initial_resource_versions messages, but will remind us to explicitly tell the server "I'm - // cancelling my subscription" when we lose interest. + // So, leave the version map entry present but blank if we are still interested in the resource. + // It will be left out of initial_resource_versions messages, but will remind us to explicitly + // tell the server "I'm cancelling my subscription" when we lose interest. In case of resources + // received as a part of the wildcard subscription or resources we already lost interest in, we + // just drop them. for (const auto& resource_name : message.removed_resources()) { - if (resource_state_.find(resource_name) != resource_state_.end()) { - resource_state_[resource_name] = ResourceState::waitingForServer(); + if (auto maybe_resource = getRequestedResourceState(resource_name); + maybe_resource.has_value()) { + maybe_resource->setAsWaitingForServer(); + } else if (const auto erased_count = ambiguous_resource_state_.erase(resource_name); + erased_count == 0) { + wildcard_resource_state_.erase(resource_name); } } ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", typeUrl(), @@ -132,25 +233,30 @@ DeltaSubscriptionState::getNextRequestInternal() { request->set_type_url(typeUrl()); if (!any_request_sent_yet_in_current_stream_) { any_request_sent_yet_in_current_stream_ = true; + const bool is_legacy_wildcard = isInitialRequestForLegacyWildcard(); // initial_resource_versions "must be populated for first request in a stream". // Also, since this might be a new server, we must explicitly state *all* of our subscription // interest. - for (auto const& [resource_name, resource_state] : resource_state_) { + for (auto const& [resource_name, resource_state] : requested_resource_state_) { // Populate initial_resource_versions with the resource versions we currently have. // Resources we are interested in, but are still waiting to get any version of from the // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) if (!resource_state.isWaitingForServer()) { (*request->mutable_initial_resource_versions())[resource_name] = resource_state.version(); } - // As mentioned above, fill resource_names_subscribe with everything, including names we - // have yet to receive any resource for unless this is a wildcard subscription, for which - // the first request on a stream must be without any resource names. - if (!wildcard_) { - names_added_.insert(resource_name); - } + // We are going over a list of resources that we are interested in, so add them to + // resource_names_subscribe. + names_added_.insert(resource_name); + } + for (auto const& [resource_name, resource_version] : wildcard_resource_state_) { + (*request->mutable_initial_resource_versions())[resource_name] = resource_version; } - // Wildcard subscription initial requests must have no resource_names_subscribe. - if (wildcard_) { + for (auto const& [resource_name, resource_version] : ambiguous_resource_state_) { + (*request->mutable_initial_resource_versions())[resource_name] = resource_version; + } + // If this is a legacy wildcard request, then make sure that the resource_names_subscribe is + // empty. + if (is_legacy_wildcard) { names_added_.clear(); } names_removed_.clear(); @@ -166,10 +272,60 @@ DeltaSubscriptionState::getNextRequestInternal() { return request; } -void DeltaSubscriptionState::addResourceState( +bool DeltaSubscriptionState::isInitialRequestForLegacyWildcard() { + if (in_initial_legacy_wildcard_) { + requested_resource_state_.insert_or_assign(Wildcard, ResourceState::waitingForServer()); + ASSERT(requested_resource_state_.contains(Wildcard)); + ASSERT(!wildcard_resource_state_.contains(Wildcard)); + ASSERT(!ambiguous_resource_state_.contains(Wildcard)); + return true; + } + + // If we are here, this means that we lost our initial wildcard mode, because we subscribed to + // something in the past. We could still be in the situation now that all we are subscribed to now + // is wildcard resource, so in such case try to send a legacy wildcard subscription request + // anyway. For this to happen, two conditions need to apply: + // + // 1. No change in interest. + // 2. The only requested resource is Wildcard resource. + // + // The invariant of the code here is that this code is executed only when + // subscriptionUpdatePending actually returns true, which in our case can only happen if the + // requested resources state_ isn't empty. + ASSERT(!requested_resource_state_.empty()); + + // If our subscription interest didn't change then the first condition for using legacy wildcard + // subscription is met. + if (!names_added_.empty() || !names_removed_.empty()) { + return false; + } + // If we requested only a wildcard resource then the second condition for using legacy wildcard + // condition is met. + return requested_resource_state_.size() == 1 && + requested_resource_state_.begin()->first == Wildcard; +} + +void DeltaSubscriptionState::addResourceStateFromServer( const envoy::service::discovery::v3::Resource& resource) { setResourceTtl(resource); - resource_state_[resource.name()] = ResourceState(resource.version()); + + if (auto maybe_resource = getRequestedResourceState(resource.name()); + maybe_resource.has_value()) { + // It is a resource that we requested. + maybe_resource->setVersion(resource.version()); + ASSERT(requested_resource_state_.contains(resource.name())); + ASSERT(!wildcard_resource_state_.contains(resource.name())); + ASSERT(!ambiguous_resource_state_.contains(resource.name())); + } else { + // It is a resource that is a part of our wildcard request. + wildcard_resource_state_.insert({resource.name(), resource.version()}); + // The resource could be ambiguous before, but now the ambiguity + // is resolved. + ambiguous_resource_state_.erase(resource.name()); + ASSERT(!requested_resource_state_.contains(resource.name())); + ASSERT(wildcard_resource_state_.contains(resource.name())); + ASSERT(!ambiguous_resource_state_.contains(resource.name())); + } } void DeltaSubscriptionState::setResourceTtl( @@ -185,12 +341,36 @@ void DeltaSubscriptionState::setResourceTtl( void DeltaSubscriptionState::ttlExpiryCallback(const std::vector& expired) { Protobuf::RepeatedPtrField removed_resources; for (const auto& resource : expired) { - resource_state_[resource] = ResourceState::waitingForServer(); - removed_resources.Add(std::string(resource)); + if (auto maybe_resource = getRequestedResourceState(resource); maybe_resource.has_value()) { + maybe_resource->setAsWaitingForServer(); + removed_resources.Add(std::string(resource)); + } else if (const auto erased_count = wildcard_resource_state_.erase(resource) + + ambiguous_resource_state_.erase(resource); + erased_count > 0) { + removed_resources.Add(std::string(resource)); + } } callbacks().onConfigUpdate({}, removed_resources, ""); } +OptRef +DeltaSubscriptionState::getRequestedResourceState(absl::string_view resource_name) { + auto itr = requested_resource_state_.find(resource_name); + if (itr == requested_resource_state_.end()) { + return {}; + } + return {itr->second}; +} + +OptRef +DeltaSubscriptionState::getRequestedResourceState(absl::string_view resource_name) const { + auto itr = requested_resource_state_.find(resource_name); + if (itr == requested_resource_state_.end()) { + return {}; + } + return {itr->second}; +} + } // namespace XdsMux } // namespace Config } // namespace Envoy diff --git a/source/common/config/xds_mux/delta_subscription_state.h b/source/common/config/xds_mux/delta_subscription_state.h index ced0c9fd52f0..f290f2186852 100644 --- a/source/common/config/xds_mux/delta_subscription_state.h +++ b/source/common/config/xds_mux/delta_subscription_state.h @@ -20,7 +20,7 @@ class DeltaSubscriptionState envoy::service::discovery::v3::DeltaDiscoveryRequest> { public: DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, - Event::Dispatcher& dispatcher, const bool wildcard); + Event::Dispatcher& dispatcher); ~DeltaSubscriptionState() override; @@ -46,20 +46,26 @@ class DeltaSubscriptionState bool isHeartbeatResource(const envoy::service::discovery::v3::Resource& resource) const; void handleGoodResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& message) override; - void addResourceState(const envoy::service::discovery::v3::Resource& resource); + void addResourceStateFromServer(const envoy::service::discovery::v3::Resource& resource); class ResourceState { public: - explicit ResourceState(absl::string_view version) : version_(version) {} // Builds a ResourceVersion in the waitingForServer state. ResourceState() = default; + // Builds a ResourceState with a specific version + ResourceState(absl::string_view version) : version_(version) {} // Self-documenting alias of default constructor. static ResourceState waitingForServer() { return ResourceState(); } + // Self-documenting alias of constructor with version. + static ResourceState withVersion(absl::string_view version) { return ResourceState(version); } // If true, we currently have no version of this resource - we are waiting for the server to // provide us with one. bool isWaitingForServer() const { return version_ == absl::nullopt; } + void setAsWaitingForServer() { version_ = absl::nullopt; } + void setVersion(absl::string_view version) { version_ = std::string(version); } + // Must not be called if waitingForServer() == true. std::string version() const { ASSERT(version_.has_value()); @@ -70,20 +76,29 @@ class DeltaSubscriptionState absl::optional version_; }; + OptRef getRequestedResourceState(absl::string_view resource_name); + OptRef getRequestedResourceState(absl::string_view resource_name) const; + + bool isInitialRequestForLegacyWildcard(); + // Not all xDS resources support heartbeats due to there being specific information encoded in // an empty response, which is indistinguishable from a heartbeat in some cases. For now we just // disable heartbeats for these resources (currently only VHDS). const bool supports_heartbeats_; - // Is the subscription is for a wildcard request. - const bool wildcard_; - // A map from resource name to per-resource version. The keys of this map are exactly the resource // names we are currently interested in. Those in the waitingForServer state currently don't have // any version for that resource: we need to inform the server if we lose interest in them, but we // also need to *not* include them in the initial_resource_versions map upon a reconnect. - absl::node_hash_map resource_state_; - + absl::node_hash_map requested_resource_state_; + // A map from resource name to per-resource version. The keys of this map are resource names we + // have received as a part of the wildcard subscription. + absl::node_hash_map wildcard_resource_state_; + // Used for storing resources that we lost interest in, but could + // also be a part of wildcard subscription. + absl::node_hash_map ambiguous_resource_state_; + + bool in_initial_legacy_wildcard_{true}; bool any_request_sent_yet_in_current_stream_{}; // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. @@ -99,8 +114,8 @@ class DeltaSubscriptionStateFactory : public SubscriptionStateFactory makeSubscriptionState(const std::string& type_url, UntypedConfigUpdateCallbacks& callbacks, - OpaqueResourceDecoder&, const bool wildcard) override { - return std::make_unique(type_url, callbacks, dispatcher_, wildcard); + OpaqueResourceDecoder&) override { + return std::make_unique(type_url, callbacks, dispatcher_); } private: diff --git a/source/common/config/xds_mux/grpc_mux_impl.cc b/source/common/config/xds_mux/grpc_mux_impl.cc index c2eba80b4ad8..c3e2d496b7f0 100644 --- a/source/common/config/xds_mux/grpc_mux_impl.cc +++ b/source/common/config/xds_mux/grpc_mux_impl.cc @@ -39,7 +39,6 @@ template GrpcMuxImpl::GrpcMuxImpl(std::unique_ptr subscription_state_factory, bool skip_subsequent_node, const LocalInfo::LocalInfo& local_info, - envoy::config::core::v3::ApiVersion transport_api_version, Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, @@ -52,8 +51,7 @@ GrpcMuxImpl::GrpcMuxImpl(std::unique_ptr subscription_state_fac dynamic_update_callback_handle_(local_info.contextProvider().addDynamicContextUpdateCallback( [this](absl::string_view resource_type_url) { onDynamicContextUpdate(resource_type_url); - })), - transport_api_version_(transport_api_version) { + })) { Config::Utility::checkLocalInfo("ads", local_info); AllMuxes::get().insert(this); } @@ -88,9 +86,8 @@ Config::GrpcMuxWatchPtr GrpcMuxImpl::addWatch( watch_map = watch_maps_.emplace(type_url, std::make_unique(options.use_namespace_matching_)) .first; - subscriptions_.emplace( - type_url, subscription_state_factory_->makeSubscriptionState( - type_url, *watch_maps_[type_url], resource_decoder, resources.empty())); + subscriptions_.emplace(type_url, subscription_state_factory_->makeSubscriptionState( + type_url, *watch_maps_[type_url], resource_decoder)); subscription_ordering_.emplace_back(type_url); } @@ -360,13 +357,12 @@ template class GrpcMuxImpl(dispatcher), skip_subsequent_node, - local_info, transport_api_version, std::move(async_client), dispatcher, - service_method, random, scope, rate_limit_settings) {} + local_info, std::move(async_client), dispatcher, service_method, random, scope, + rate_limit_settings) {} // GrpcStreamCallbacks for GrpcMuxDelta void GrpcMuxDelta::requestOnDemandUpdate(const std::string& type_url, @@ -381,13 +377,12 @@ void GrpcMuxDelta::requestOnDemandUpdate(const std::string& type_url, GrpcMuxSotw::GrpcMuxSotw(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info, bool skip_subsequent_node) : GrpcMuxImpl(std::make_unique(dispatcher), skip_subsequent_node, - local_info, transport_api_version, std::move(async_client), dispatcher, - service_method, random, scope, rate_limit_settings) {} + local_info, std::move(async_client), dispatcher, service_method, random, scope, + rate_limit_settings) {} Config::GrpcMuxWatchPtr NullGrpcMuxImpl::addWatch(const std::string&, const absl::flat_hash_set&, diff --git a/source/common/config/xds_mux/grpc_mux_impl.h b/source/common/config/xds_mux/grpc_mux_impl.h index a1ec7f2332dd..ee60d0020621 100644 --- a/source/common/config/xds_mux/grpc_mux_impl.h +++ b/source/common/config/xds_mux/grpc_mux_impl.h @@ -57,11 +57,10 @@ class GrpcMuxImpl : public GrpcStreamCallbacks, Logger::Loggable { public: GrpcMuxImpl(std::unique_ptr subscription_state_factory, bool skip_subsequent_node, - const LocalInfo::LocalInfo& local_info, - envoy::config::core::v3::ApiVersion transport_api_version, - Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, - const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, - Stats::Scope& scope, const RateLimitSettings& rate_limit_settings); + const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr&& async_client, + Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings); ~GrpcMuxImpl() override; @@ -73,6 +72,7 @@ class GrpcMuxImpl : public GrpcStreamCallbacks, static void shutdownAll(); void shutdown() override { shutdown_ = true; } + bool isShutdown() { return shutdown_; } // TODO (dmitri-d) return a naked pointer instead of the wrapper once the legacy mux has been // removed and the mux interface can be changed @@ -92,7 +92,6 @@ class GrpcMuxImpl : public GrpcStreamCallbacks, const absl::flat_hash_map>& subscriptions() const { return subscriptions_; } - bool isUnified() const override { return true; } // GrpcStreamCallbacks void onStreamEstablished() override { handleEstablishedStream(); } @@ -151,9 +150,6 @@ class GrpcMuxImpl : public GrpcStreamCallbacks, any_request_sent_yet_in_current_stream_ = value; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } - const envoy::config::core::v3::ApiVersion& transportApiVersion() const { - return transport_api_version_; - } private: // Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check @@ -204,7 +200,6 @@ class GrpcMuxImpl : public GrpcStreamCallbacks, // this one is up to GrpcMux. const LocalInfo::LocalInfo& local_info_; Common::CallbackHandlePtr dynamic_update_callback_handle_; - const envoy::config::core::v3::ApiVersion transport_api_version_; // True iff Envoy is shutting down; no messages should be sent on the `grpc_stream_` when this is // true because it may contain dangling pointers. @@ -216,11 +211,9 @@ class GrpcMuxDelta : public GrpcMuxImpl { public: GrpcMuxDelta(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, - const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, - Random::RandomGenerator& random, Stats::Scope& scope, - const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info, - bool skip_subsequent_node); + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, + const LocalInfo::LocalInfo& local_info, bool skip_subsequent_node); // GrpcStreamCallbacks void requestOnDemandUpdate(const std::string& type_url, @@ -232,11 +225,9 @@ class GrpcMuxSotw : public GrpcMuxImpl { public: GrpcMuxSotw(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, - const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, - Random::RandomGenerator& random, Stats::Scope& scope, - const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info, - bool skip_subsequent_node); + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, + const LocalInfo::LocalInfo& local_info, bool skip_subsequent_node); // GrpcStreamCallbacks void requestOnDemandUpdate(const std::string&, const absl::flat_hash_set&) override { @@ -259,7 +250,6 @@ class NullGrpcMuxImpl : public GrpcMux { SubscriptionCallbacks&, OpaqueResourceDecoder&, const SubscriptionOptions&) override; - // legacy mux interface not implemented by unified mux. void requestOnDemandUpdate(const std::string&, const absl::flat_hash_set&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/common/config/xds_mux/sotw_subscription_state.cc b/source/common/config/xds_mux/sotw_subscription_state.cc index 90be3a318a2a..77e2446e149d 100644 --- a/source/common/config/xds_mux/sotw_subscription_state.cc +++ b/source/common/config/xds_mux/sotw_subscription_state.cc @@ -44,7 +44,7 @@ void SotwSubscriptionState::markStreamFresh() { void SotwSubscriptionState::handleGoodResponse( const envoy::service::discovery::v3::DiscoveryResponse& message) { - Protobuf::RepeatedPtrField non_heartbeat_resources; + std::vector non_heartbeat_resources; std::vector resources_with_ttl( message.resources().size()); @@ -65,7 +65,7 @@ void SotwSubscriptionState::handleGoodResponse( if (isHeartbeatResource(*decoded_resource, message.version_info())) { continue; } - non_heartbeat_resources.Add()->CopyFrom(any); + non_heartbeat_resources.push_back(std::move(decoded_resource)); } } diff --git a/source/common/config/xds_mux/sotw_subscription_state.h b/source/common/config/xds_mux/sotw_subscription_state.h index 86063198f5a7..0376a22c58a7 100644 --- a/source/common/config/xds_mux/sotw_subscription_state.h +++ b/source/common/config/xds_mux/sotw_subscription_state.h @@ -68,7 +68,7 @@ class SotwSubscriptionStateFactory : public SubscriptionStateFactory makeSubscriptionState(const std::string& type_url, UntypedConfigUpdateCallbacks& callbacks, - OpaqueResourceDecoder& resource_decoder, const bool) override { + OpaqueResourceDecoder& resource_decoder) override { return std::make_unique(type_url, callbacks, dispatcher_, resource_decoder); } diff --git a/source/common/config/xds_mux/subscription_state.h b/source/common/config/xds_mux/subscription_state.h index 9f9b48cd7723..a440b8a5a889 100644 --- a/source/common/config/xds_mux/subscription_state.h +++ b/source/common/config/xds_mux/subscription_state.h @@ -123,8 +123,7 @@ template class SubscriptionStateFactory { // Note that, outside of tests, we expect callbacks to always be a WatchMap. virtual std::unique_ptr makeSubscriptionState(const std::string& type_url, UntypedConfigUpdateCallbacks& callbacks, - OpaqueResourceDecoder& resource_decoder, - const bool wildcard) PURE; + OpaqueResourceDecoder& resource_decoder) PURE; }; } // namespace XdsMux diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 113836ab1364..096f27a20683 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -175,18 +175,23 @@ void ConnPoolImplBase::attachStreamToClient(Envoy::ConnectionPool::ActiveClient& } ENVOY_CONN_LOG(debug, "creating stream", client); + // Latch capacity before updating remaining streams. + uint64_t capacity = client.currentUnusedCapacity(); client.remaining_streams_--; if (client.remaining_streams_ == 0) { ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); host_->cluster().stats().upstream_cx_max_requests_.inc(); transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); - } else if (client.numActiveStreams() + 1 >= client.concurrent_stream_limit_) { + } else if (capacity == 1) { // As soon as the new stream is created, the client will be maxed out. transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); } // Decrement the capacity, as there's one less stream available for serving. - state_.decrConnectingAndConnectedStreamCapacity(1); + // For HTTP/3, the capacity is updated in newStreamEncoder. + if (trackStreamCapacity()) { + state_.decrConnectingAndConnectedStreamCapacity(1); + } // Track the new active stream. state_.incrActiveStreams(1); num_active_streams_++; @@ -213,14 +218,17 @@ void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& clien // If the effective client capacity was limited by concurrency, increase connecting capacity. // If the effective client capacity was limited by max total streams, this will not result in an // increment as no capacity is freed up. - if (client.remaining_streams_ > client.concurrent_stream_limit_ - client.numActiveStreams() - 1 || - had_negative_capacity) { + // We don't update the capacity for HTTP/3 as the stream count should only + // increase when a MAX_STREAMS frame is received. + if (trackStreamCapacity() && (client.remaining_streams_ > client.concurrent_stream_limit_ - + client.numActiveStreams() - 1 || + had_negative_capacity)) { state_.incrConnectingAndConnectedStreamCapacity(1); } if (client.state() == ActiveClient::State::DRAINING && client.numActiveStreams() == 0) { // Close out the draining client if we no longer have active streams. client.close(); - } else if (client.state() == ActiveClient::State::BUSY) { + } else if (client.state() == ActiveClient::State::BUSY && client.currentUnusedCapacity() != 0) { transitionActiveClientState(client, ActiveClient::State::READY); if (!delay_attaching_stream) { onUpstreamReady(); @@ -296,6 +304,9 @@ void ConnPoolImplBase::onUpstreamReady() { state_.decrPendingStreams(1); pending_streams_.pop_back(); } + if (!pending_streams_.empty()) { + tryCreateNewConnections(); + } } std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { @@ -457,6 +468,16 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view // this forces part of its cleanup to happen now. client.releaseResources(); + // Again, since we know this object is going to be deferredDelete'd(), we take + // this opportunity to disable and reset the connection duration timer so that + // it doesn't trigger while on the deferred delete list. In theory it is safe + // to handle the CLOSED state in onConnectionDurationTimeout, but we handle + // it here for simplicity and safety anyway. + if (client.connection_duration_timer_) { + client.connection_duration_timer_->disableTimer(); + client.connection_duration_timer_.reset(); + } + dispatcher_.deferredDelete(client.removeFromList(owningList(client.state()))); checkForIdleAndCloseIdleConnsIfDraining(); @@ -473,6 +494,15 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view ASSERT(client.state() == ActiveClient::State::CONNECTING); transitionActiveClientState(client, ActiveClient::State::READY); + // Now that the active client is ready, set up a timer for max connection duration. + const absl::optional max_connection_duration = + client.parent_.host()->cluster().maxConnectionDuration(); + if (max_connection_duration.has_value()) { + client.connection_duration_timer_ = client.parent_.dispatcher().createTimer( + [&client]() { client.onConnectionDurationTimeout(); }); + client.connection_duration_timer_->enableTimer(max_connection_duration.value()); + } + // At this point, for the mixed ALPN pool, the client may be deleted. Do not // refer to client after this point. onConnected(client); @@ -562,7 +592,7 @@ ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint32_t lifetime_stream_li uint32_t concurrent_stream_limit) : parent_(parent), remaining_streams_(translateZeroToUnlimited(lifetime_stream_limit)), concurrent_stream_limit_(translateZeroToUnlimited(concurrent_stream_limit)), - connect_timer_(parent_.dispatcher().createTimer([this]() -> void { onConnectTimeout(); })) { + connect_timer_(parent_.dispatcher().createTimer([this]() { onConnectTimeout(); })) { conn_connect_ms_ = std::make_unique( parent_.host()->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher().timeSource()); conn_length_ = std::make_unique( @@ -577,10 +607,6 @@ ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint32_t lifetime_stream_li ActiveClient::~ActiveClient() { releaseResourcesBase(); } -void ActiveClient::onEvent(Network::ConnectionEvent event) { - parent_.onConnectionEvent(*this, "", event); -} - void ActiveClient::releaseResourcesBase() { if (!resources_released_) { resources_released_ = true; @@ -600,6 +626,34 @@ void ActiveClient::onConnectTimeout() { close(); } +void ActiveClient::onConnectionDurationTimeout() { + // The connection duration timer should only have started after we left the CONNECTING state. + ENVOY_BUG(state_ != ActiveClient::State::CONNECTING, + "max connection duration reached while connecting"); + + // The connection duration timer should have been disabled and reset in onConnectionEvent + // for closing connections. + ENVOY_BUG(state_ != ActiveClient::State::CLOSED, "max connection duration reached while closed"); + + // There's nothing to do if the client is connecting, closed or draining. + // Two of these cases are bugs (see above), but it is safe to no-op either way. + if (state_ == ActiveClient::State::CONNECTING || state_ == ActiveClient::State::CLOSED || + state_ == ActiveClient::State::DRAINING) { + return; + } + + ENVOY_CONN_LOG(debug, "max connection duration reached, DRAINING", *this); + parent_.host()->cluster().stats().upstream_cx_max_duration_reached_.inc(); + parent_.transitionActiveClientState(*this, Envoy::ConnectionPool::ActiveClient::State::DRAINING); + + // Close out the draining client if we no longer have active streams. + // We have to do this here because there won't be an onStreamClosed (because there are + // no active streams) to do it for us later. + if (numActiveStreams() == 0) { + close(); + } +} + void ActiveClient::drain() { if (currentUnusedCapacity() <= 0) { return; diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index dafc845f8891..29a714410f8c 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -38,13 +38,15 @@ class ActiveClient : public LinkedObject, void releaseResourcesBase(); // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} // Called if the connection does not complete within the cluster's connectTimeout() void onConnectTimeout(); + // Called if the maximum connection duration is reached. + void onConnectionDurationTimeout(); + // Returns the concurrent stream limit, accounting for if the total stream limit // is less than the concurrent stream limit. uint32_t effectiveConcurrentStreamLimit() const { @@ -54,7 +56,7 @@ class ActiveClient : public LinkedObject, // Returns the application protocol, or absl::nullopt for TCP. virtual absl::optional protocol() const PURE; - int64_t currentUnusedCapacity() const { + virtual int64_t currentUnusedCapacity() const { int64_t remaining_concurrent_streams = static_cast(concurrent_stream_limit_) - numActiveStreams(); @@ -100,12 +102,18 @@ class ActiveClient : public LinkedObject, virtual void drain(); ConnPoolImplBase& parent_; + // The count of remaining streams allowed for this connection. + // This will start out as the total number of streams per connection if capped + // by configuration, or it will be set to std::numeric_limits::max() to be + // (functionally) unlimited. + // TODO: this could be moved to an optional to make it actually unlimited. uint32_t remaining_streams_; uint32_t concurrent_stream_limit_; Upstream::HostDescriptionConstSharedPtr real_host_description_; Stats::TimespanPtr conn_connect_ms_; Stats::TimespanPtr conn_length_; Event::TimerPtr connect_timer_; + Event::TimerPtr connection_duration_timer_; bool resources_released_{false}; bool timed_out_{false}; @@ -145,6 +153,10 @@ class ConnPoolImplBase : protected Logger::Loggable { virtual ~ConnPoolImplBase(); void deleteIsPendingImpl(); + // By default, the connection pool will track connected and connecting stream + // capacity as streams are created and destroyed. QUIC does custom stream + // accounting so will override this to false. + virtual bool trackStreamCapacity() { return true; } // A helper function to get the specific context type from the base class context. template T& typedContext(AttachContext& context) { @@ -231,6 +243,9 @@ class ConnPoolImplBase : protected Logger::Loggable { void decrClusterStreamCapacity(uint32_t delta) { state_.decrConnectingAndConnectedStreamCapacity(delta); } + void incrClusterStreamCapacity(uint32_t delta) { + state_.incrConnectingAndConnectedStreamCapacity(delta); + } void dumpState(std::ostream& os, int indent_level = 0) const { const char* spaces = spacesForLevel(indent_level); os << spaces << "ConnPoolImplBase " << this << DUMP_MEMBER(ready_clients_.size()) @@ -252,6 +267,9 @@ class ConnPoolImplBase : protected Logger::Loggable { connecting_stream_capacity_ -= delta; } + // Called when an upstream is ready to serve pending streams. + void onUpstreamReady(); + protected: virtual void onConnected(Envoy::ConnectionPool::ActiveClient&) {} @@ -262,7 +280,6 @@ class ConnPoolImplBase : protected Logger::Loggable { NoConnectionRateLimited, CreatedButRateLimited, }; - // Creates up to 3 connections, based on the preconnect ratio. // Returns the ConnectionResult of the last attempt. ConnectionResult tryCreateNewConnections(); @@ -339,7 +356,6 @@ class ConnPoolImplBase : protected Logger::Loggable { // True iff this object is in the deferred delete list. bool deferred_deleting_{false}; - void onUpstreamReady(); Event::SchedulableCallbackPtr upstream_ready_cb_; }; diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 11ba35646e9a..22efdc958ac2 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -34,8 +34,8 @@ envoy_cc_library( ":dispatcher_includes", ":libevent_scheduler_lib", ":real_time_system_lib", - ":signal_lib", ":scaled_range_timer_manager_lib", + ":signal_lib", "//envoy/common:scope_tracker_interface", "//envoy/common:time_interface", "//envoy/event:signal_interface", @@ -44,14 +44,10 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:thread_lib", "//source/common/filesystem:watcher_lib", - "//source/common/network:dns_lib", "//source/common/network:connection_lib", "//source/common/network:listener_lib", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", - ] + select({ - "//bazel:apple": ["//source/common/network:apple_dns_lib"], - "//conditions:default": [], - }), + ], ) envoy_cc_library( diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index a7a30838aee9..1f28e9a24fb8 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -23,7 +23,6 @@ #include "source/common/event/timer_impl.h" #include "source/common/filesystem/watcher_impl.h" #include "source/common/network/connection_impl.h" -#include "source/common/network/dns_impl.h" #include "source/common/network/tcp_listener_impl.h" #include "source/common/network/udp_listener_impl.h" #include "source/common/runtime/runtime_features.h" @@ -34,10 +33,6 @@ #include "source/common/signal/signal_action.h" #endif -#ifdef __APPLE__ -#include "source/common/network/apple_dns_impl.h" -#endif - namespace Envoy { namespace Event { @@ -157,30 +152,6 @@ DispatcherImpl::createClientConnection(Network::Address::InstanceConstSharedPtr std::move(transport_socket), options); } -Network::DnsResolverSharedPtr DispatcherImpl::createDnsResolver( - const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) { - ASSERT(isThreadSafe()); -#ifdef __APPLE__ - static bool use_apple_api_for_dns_lookups = - Runtime::runtimeFeatureEnabled("envoy.restart_features.use_apple_api_for_dns_lookups"); - if (use_apple_api_for_dns_lookups) { - RELEASE_ASSERT( - resolvers.empty(), - "defining custom resolvers is not possible when using Apple APIs for DNS resolution. " - "Apple's API only allows overriding DNS resolvers via system settings. Delete resolvers " - "config or disable the envoy.restart_features.use_apple_api_for_dns_lookups runtime " - "feature."); - RELEASE_ASSERT(!dns_resolver_options.use_tcp_for_dns_lookups(), - "using TCP for DNS lookups is not possible when using Apple APIs for DNS " - "resolution. Apple' API only uses UDP for DNS resolution. Use UDP or disable " - "the envoy.restart_features.use_apple_api_for_dns_lookups runtime feature."); - return std::make_shared(*this, api_.rootScope()); - } -#endif - return std::make_shared(*this, resolvers, dns_resolver_options); -} - FileEventPtr DispatcherImpl::createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) { ASSERT(isThreadSafe()); diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index d98b5267c1e5..513ebc9d1b2b 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -66,9 +66,6 @@ class DispatcherImpl : Logger::Loggable, Network::Address::InstanceConstSharedPtr source_address, Network::TransportSocketPtr&& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options) override; - Network::DnsResolverSharedPtr createDnsResolver( - const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) override; FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) override; Filesystem::WatcherPtr createFilesystemWatcher() override; diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index 5b83e694d548..4c68e6193fc4 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -55,6 +55,9 @@ void FileEventImpl::activate(uint32_t events) { void FileEventImpl::assignEvents(uint32_t events, event_base* base) { ASSERT(dispatcher_.isThreadSafe()); ASSERT(base != nullptr); + // TODO(antoniovicente) remove this once ConnectionImpl can + // handle Read and Close events delivered together. + ASSERT(!((events & FileReadyType::Read) && (events & FileReadyType::Closed))); enabled_events_ = events; event_assign( &raw_event_, base, fd_, @@ -120,7 +123,6 @@ void FileEventImpl::unregisterEventIfEmulatedEdge(uint32_t event) { ASSERT(dispatcher_.isThreadSafe()); // This constexpr if allows the compiler to optimize away the function on POSIX if constexpr (PlatformDefaultTriggerType == FileTriggerType::EmulatedEdge) { - ASSERT((event & (FileReadyType::Read | FileReadyType::Write)) == event); if (trigger_ == FileTriggerType::EmulatedEdge) { auto new_event_mask = enabled_events_ & ~event; updateEvents(new_event_mask); @@ -156,7 +158,6 @@ void FileEventImpl::mergeInjectedEventsAndRunCb(uint32_t events) { injected_activation_events_ = injected_activation_events_ & ~FileReadyType::Read; } } - events |= injected_activation_events_; injected_activation_events_ = 0; activation_cb_->cancel(); diff --git a/source/common/filter/config_discovery_impl.cc b/source/common/filter/config_discovery_impl.cc index 74acc6b8383e..5b694d63b427 100644 --- a/source/common/filter/config_discovery_impl.cc +++ b/source/common/filter/config_discovery_impl.cc @@ -132,19 +132,17 @@ void FilterConfigSubscription::onConfigUpdate( for (auto* provider : filter_config_providers_) { provider->validateTerminalFilter(filter_config_name_, factory.name(), is_terminal_filter); } - Envoy::Http::FilterFactoryCb factory_callback = - factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_); ENVOY_LOG(debug, "Updating filter config {}", filter_config_name_); Common::applyToAllWithCleanup( filter_config_providers_, - [&factory_callback, &version_info](DynamicFilterConfigProviderImplBase* provider, - std::shared_ptr cleanup) { - provider->onConfigUpdate(factory_callback, version_info, [cleanup] {}); + [&message, &version_info](DynamicFilterConfigProviderImplBase* provider, + std::shared_ptr cleanup) { + provider->onConfigUpdate(*message, version_info, [cleanup] {}); }, [this]() { stats_.config_reload_.inc(); }); last_config_hash_ = new_hash; - last_config_ = factory_callback; + last_config_ = std::move(message); last_type_url_ = type_url; last_version_info_ = version_info; last_filter_name_ = factory.name(); @@ -165,7 +163,7 @@ void FilterConfigSubscription::onConfigUpdate( [this]() { stats_.config_reload_.inc(); }); last_config_hash_ = 0; - last_config_ = absl::nullopt; + last_config_ = nullptr; last_type_url_ = ""; last_filter_is_terminal_ = false; last_filter_name_ = ""; @@ -222,7 +220,7 @@ void FilterConfigProviderManagerImplBase::applyLastOrDefaultConfig( // update arrives first. In this case, use the default config, increment a metric, // and the applied config eventually converges once ECDS update arrives. bool last_config_valid = false; - if (subscription->lastConfig().has_value()) { + if (subscription->lastConfig()) { TRY_ASSERT_MAIN_THREAD { provider.validateTypeUrl(subscription->lastTypeUrl()); provider.validateTerminalFilter(filter_config_name, subscription->lastFilterName(), @@ -235,7 +233,7 @@ void FilterConfigProviderManagerImplBase::applyLastOrDefaultConfig( subscription->incrementConflictCounter(); } if (last_config_valid) { - provider.onConfigUpdate(subscription->lastConfig().value(), subscription->lastVersionInfo(), + provider.onConfigUpdate(*subscription->lastConfig(), subscription->lastVersionInfo(), nullptr); } } @@ -265,16 +263,20 @@ DynamicFilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFil require_type_urls.emplace(factory_type_url); } - Envoy::Http::FilterFactoryCb default_config = nullptr; + ProtobufTypes::MessagePtr default_config; if (config_source.has_default_config()) { - default_config = getDefaultConfig(config_source.default_config(), filter_config_name, - factory_context, stat_prefix, last_filter_in_filter_chain, - filter_chain_type, require_type_urls); + default_config = + getDefaultConfig(config_source.default_config(), filter_config_name, factory_context, + last_filter_in_filter_chain, filter_chain_type, require_type_urls); } auto provider = std::make_unique( - subscription, require_type_urls, factory_context, default_config, last_filter_in_filter_chain, - filter_chain_type); + subscription, require_type_urls, factory_context, std::move(default_config), + last_filter_in_filter_chain, filter_chain_type, + [this, stat_prefix, + &factory_context](const Protobuf::Message& message) -> Envoy::Http::FilterFactoryCb { + return instantiateFilterFactory(message, stat_prefix, factory_context); + }); // Ensure the subscription starts if it has not already. if (config_source.apply_default_config_without_warming()) { @@ -284,11 +286,11 @@ DynamicFilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFil return provider; } -Http::FilterFactoryCb HttpFilterConfigProviderManagerImpl::getDefaultConfig( +ProtobufTypes::MessagePtr HttpFilterConfigProviderManagerImpl::getDefaultConfig( const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, - Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, - bool last_filter_in_filter_chain, const std::string& filter_chain_type, - const absl::flat_hash_set require_type_urls) const { + Server::Configuration::FactoryContext& factory_context, bool last_filter_in_filter_chain, + const std::string& filter_chain_type, + const absl::flat_hash_set& require_type_urls) const { auto* default_factory = Config::Utility::getFactoryByType( proto_config); @@ -304,7 +306,15 @@ Http::FilterFactoryCb HttpFilterConfigProviderManagerImpl::getDefaultConfig( filter_config_name, default_factory->name(), filter_chain_type, default_factory->isTerminalFilterByProto(*message, factory_context), last_filter_in_filter_chain); - return default_factory->createFilterFactoryFromProto(*message, stat_prefix, factory_context); + return message; +} + +Http::FilterFactoryCb HttpFilterConfigProviderManagerImpl::instantiateFilterFactory( + const Protobuf::Message& message, const std::string& stat_prefix, + Server::Configuration::FactoryContext& factory_context) const { + auto* factory = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::getFactoryByType(message.GetTypeName()); + return factory->createFilterFactoryFromProto(message, stat_prefix, factory_context); } } // namespace Filter diff --git a/source/common/filter/config_discovery_impl.h b/source/common/filter/config_discovery_impl.h index 7646f4217699..4183ffe41fbb 100644 --- a/source/common/filter/config_discovery_impl.h +++ b/source/common/filter/config_discovery_impl.h @@ -31,8 +31,7 @@ using FilterConfigSubscriptionSharedPtr = std::shared_ptr { +class DynamicFilterConfigProviderImplBase : public Config::DynamicExtensionConfigProviderBase { public: DynamicFilterConfigProviderImplBase(FilterConfigSubscriptionSharedPtr& subscription, const absl::flat_hash_set& require_type_urls, @@ -66,17 +65,17 @@ class DynamicFilterConfigProviderImplBase class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProviderImplBase, public DynamicFilterConfigProvider { public: - DynamicFilterConfigProviderImpl(FilterConfigSubscriptionSharedPtr& subscription, - const absl::flat_hash_set& require_type_urls, - Server::Configuration::FactoryContext& factory_context, - Envoy::Http::FilterFactoryCb default_config, - bool last_filter_in_filter_chain, - const std::string& filter_chain_type) + DynamicFilterConfigProviderImpl( + FilterConfigSubscriptionSharedPtr& subscription, + const absl::flat_hash_set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, + ProtobufTypes::MessagePtr&& default_config, bool last_filter_in_filter_chain, + const std::string& filter_chain_type, + std::function factory_cb_fn) : DynamicFilterConfigProviderImplBase(subscription, require_type_urls, last_filter_in_filter_chain, filter_chain_type), - default_configuration_(default_config ? absl::make_optional(default_config) - : absl::nullopt), - tls_(factory_context.threadLocal()) { + default_configuration_(std::move(default_config)), tls_(factory_context.threadLocal()), + factory_cb_fn_(factory_cb_fn) { tls_.set([](Event::Dispatcher&) { return std::make_shared(); }); }; @@ -84,9 +83,10 @@ class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProviderImplBa const std::string& name() override { return DynamicFilterConfigProviderImplBase::name(); } absl::optional config() override { return tls_->config_; } - // Config::DynamicExtensionConfigProvider - void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&, + // Config::DynamicExtensionConfigProviderBase + void onConfigUpdate(const Protobuf::Message& message, const std::string&, Config::ConfigAppliedCb cb) override { + const Envoy::Http::FilterFactoryCb config = factory_cb_fn_(message); tls_.runOnAllThreads( [config, cb](OptRef tls) { tls->config_ = config; @@ -102,12 +102,15 @@ class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProviderImplBa } void onConfigRemoved(Config::ConfigAppliedCb applied_on_all_threads) override { + const absl::optional default_config = + default_configuration_ ? absl::make_optional(factory_cb_fn_(*default_configuration_)) + : absl::nullopt; tls_.runOnAllThreads( - [config = default_configuration_](OptRef tls) { tls->config_ = config; }, - [this, applied_on_all_threads]() { + [config = default_config](OptRef tls) { tls->config_ = config; }, + [this, default_config, applied_on_all_threads]() { // This happens after all workers have discarded the previous config so it can be safely // deleted on the main thread by an update with the new config. - this->current_config_ = default_configuration_; + this->current_config_ = default_config; if (applied_on_all_threads) { applied_on_all_threads(); } @@ -129,8 +132,9 @@ class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProviderImplBa // Currently applied configuration to ensure that the main thread deletes the last reference to // it. absl::optional current_config_{absl::nullopt}; - const absl::optional default_configuration_; + const ProtobufTypes::MessagePtr default_configuration_; ThreadLocal::TypedSlot tls_; + const std::function factory_cb_fn_; }; /** @@ -168,7 +172,7 @@ class FilterConfigSubscription const Init::SharedTargetImpl& initTarget() { return init_target_; } const std::string& name() { return filter_config_name_; } - const absl::optional& lastConfig() { return last_config_; } + const Protobuf::Message* lastConfig() { return last_config_.get(); } const std::string& lastTypeUrl() { return last_type_url_; } const std::string& lastVersionInfo() { return last_version_info_; } const std::string& lastFilterName() { return last_filter_name_; } @@ -189,7 +193,7 @@ class FilterConfigSubscription const std::string filter_config_name_; uint64_t last_config_hash_{0ul}; - absl::optional last_config_{absl::nullopt}; + ProtobufTypes::MessagePtr last_config_; std::string last_type_url_; std::string last_version_info_; std::string last_filter_name_; @@ -271,22 +275,27 @@ class FilterConfigProviderManagerImpl : public FilterConfigProviderManagerImplBa } protected: - virtual Http::FilterFactoryCb + virtual ProtobufTypes::MessagePtr getDefaultConfig(const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix, bool last_filter_in_filter_chain, - const std::string& filter_chain_type, - const absl::flat_hash_set require_type_urls) const PURE; + bool last_filter_in_filter_chain, const std::string& filter_chain_type, + const absl::flat_hash_set& require_type_urls) const PURE; + + virtual Http::FilterFactoryCb + instantiateFilterFactory(const Protobuf::Message& message, const std::string& stat_prefix, + Server::Configuration::FactoryContext& factory_context) const PURE; }; class HttpFilterConfigProviderManagerImpl : public FilterConfigProviderManagerImpl { protected: - Http::FilterFactoryCb + ProtobufTypes::MessagePtr getDefaultConfig(const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix, bool last_filter_in_filter_chain, - const std::string& filter_chain_type, - const absl::flat_hash_set require_type_urls) const override; + bool last_filter_in_filter_chain, const std::string& filter_chain_type, + const absl::flat_hash_set& require_type_urls) const override; + Http::FilterFactoryCb + instantiateFilterFactory(const Protobuf::Message& message, const std::string& stat_prefix, + Server::Configuration::FactoryContext& factory_context) const override; }; } // namespace Filter diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index cc11f6cf0d3e..51da65e89e36 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -716,6 +716,26 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { } else if (field_name == "BYTES_RECEIVED") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesReceived(); }); + } else if (field_name == "UPSTREAM_WIRE_BYTES_RECEIVED") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getUpstreamBytesMeter()->wireBytesReceived(); + }); + } else if (field_name == "UPSTREAM_HEADER_BYTES_RECEIVED") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getUpstreamBytesMeter()->headerBytesReceived(); + }); + } else if (field_name == "DOWNSTREAM_WIRE_BYTES_RECEIVED") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getDownstreamBytesMeter()->wireBytesReceived(); + }); + } else if (field_name == "DOWNSTREAM_HEADER_BYTES_RECEIVED") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getDownstreamBytesMeter()->headerBytesReceived(); + }); } else if (field_name == "PROTOCOL") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { @@ -739,6 +759,26 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { } else if (field_name == "BYTES_SENT") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { return stream_info.bytesSent(); }); + } else if (field_name == "UPSTREAM_WIRE_BYTES_SENT") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getUpstreamBytesMeter()->wireBytesSent(); + }); + } else if (field_name == "UPSTREAM_HEADER_BYTES_SENT") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getUpstreamBytesMeter()->headerBytesSent(); + }); + } else if (field_name == "DOWNSTREAM_WIRE_BYTES_SENT") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getDownstreamBytesMeter()->wireBytesSent(); + }); + } else if (field_name == "DOWNSTREAM_HEADER_BYTES_SENT") { + field_extractor_ = std::make_unique( + [](const StreamInfo::StreamInfo& stream_info) { + return stream_info.getDownstreamBytesMeter()->headerBytesSent(); + }); } else if (field_name == "DURATION") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { return stream_info.requestComplete(); }); diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index dd63a6bac88f..9bb41f34f850 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -9,6 +9,8 @@ #include "source/common/http/header_map_impl.h" #include "source/common/http/utility.h" +#include "absl/strings/str_cat.h" + namespace Envoy { namespace Grpc { @@ -226,10 +228,14 @@ AsyncRequestImpl::AsyncRequestImpl(AsyncClientImpl& parent, absl::string_view se : AsyncStreamImpl(parent, service_full_name, method_name, *this, options), request_(std::move(request)), callbacks_(callbacks) { - current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), - "async " + parent.remote_cluster_name_ + " egress", - parent.time_source_.systemTime()); + current_span_ = + parent_span.spawnChild(Tracing::EgressConfig::get(), + absl::StrCat("async ", service_full_name, ".", method_name, " egress"), + parent.time_source_.systemTime()); current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.remote_cluster_name_); + current_span_->setTag(Tracing::Tags::get().UpstreamAddress, parent.host_name_.empty() + ? parent.remote_cluster_name_ + : parent.host_name_); current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); } diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 8e4539536cdc..dea6cc94e869 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -13,6 +13,7 @@ #include "source/common/router/header_parser.h" #include "source/common/tracing/http_tracer_impl.h" +#include "absl/strings/str_cat.h" #include "grpcpp/support/proto_buffer_reader.h" namespace Envoy { @@ -80,7 +81,7 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, const envoy::config::core::v3::GrpcService& config, Api::Api& api, const StatNames& stat_names) : dispatcher_(dispatcher), tls_(tls), stat_prefix_(config.google_grpc().stat_prefix()), - scope_(scope), + target_uri_(config.google_grpc().target_uri()), scope_(scope), per_stream_buffer_limit_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)), metadata_parser_( @@ -425,10 +426,12 @@ GoogleAsyncRequestImpl::GoogleAsyncRequestImpl( Tracing::Span& parent_span, const Http::AsyncClient::RequestOptions& options) : GoogleAsyncStreamImpl(parent, service_full_name, method_name, *this, options), request_(std::move(request)), callbacks_(callbacks) { - current_span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), - "async " + parent.stat_prefix_ + " egress", - parent.timeSource().systemTime()); + current_span_ = + parent_span.spawnChild(Tracing::EgressConfig::get(), + absl::StrCat("async ", service_full_name, ".", method_name, " egress"), + parent.timeSource().systemTime()); current_span_->setTag(Tracing::Tags::get().UpstreamCluster, parent.stat_prefix_); + current_span_->setTag(Tracing::Tags::get().UpstreamAddress, parent.target_uri_); current_span_->setTag(Tracing::Tags::get().Component, Tracing::Tags::get().Proxy); } diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 4977ed219132..bd43b1103e9f 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -198,6 +198,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable active_streams_; const std::string stat_prefix_; + const std::string target_uri_; Stats::ScopeSharedPtr scope_; GoogleAsyncClientStats stats_; uint64_t per_stream_buffer_limit_bytes_; diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 38eef19b2dcb..6298fe50b3b2 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -496,7 +496,6 @@ envoy_cc_library( "abseil_optional", ], deps = [ - ":legacy_path_canonicalizer", "//envoy/http:header_map_interface", "//source/common/common:logger_lib", "//source/common/runtime:runtime_features_lib", @@ -504,13 +503,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "legacy_path_canonicalizer", - srcs = ["legacy_path_canonicalizer.cc"], - hdrs = ["legacy_path_canonicalizer.h"], - deps = ["//source/common/chromium_url"], -) - envoy_cc_library( name = "request_id_extension_lib", srcs = [ diff --git a/source/common/http/alternate_protocols_cache_impl.cc b/source/common/http/alternate_protocols_cache_impl.cc index afbc37f75ca2..c1c2d4bf1892 100644 --- a/source/common/http/alternate_protocols_cache_impl.cc +++ b/source/common/http/alternate_protocols_cache_impl.cc @@ -3,14 +3,38 @@ #include "source/common/common/logger.h" #include "quiche/spdy/core/spdy_alt_svc_wire_format.h" +#include "re2/re2.h" namespace Envoy { namespace Http { namespace { -std::string originToString(const AlternateProtocolsCache::Origin& origin) { + +struct RegexHolder { + RegexHolder() : origin_regex("(.*)://(.*):(\\d+)") {} + + const re2::RE2 origin_regex; +}; + +using ConstRegexHolder = ConstSingleton; + +} // namespace + +std::string +AlternateProtocolsCacheImpl::originToString(const AlternateProtocolsCache::Origin& origin) { return absl::StrCat(origin.scheme_, "://", origin.hostname_, ":", origin.port_); } -} // namespace + +absl::optional +AlternateProtocolsCacheImpl::stringToOrigin(const std::string& str) { + const re2::RE2& origin_regex = ConstRegexHolder::get().origin_regex; + std::string scheme; + std::string hostname; + int port = 0; + if (re2::RE2::FullMatch(str.c_str(), origin_regex, &scheme, &hostname, &port)) { + return AlternateProtocolsCache::Origin(scheme, hostname, port); + } + return {}; +} std::string AlternateProtocolsCacheImpl::protocolsToStringForCache( const std::vector& protocols, TimeSource& /*time_source*/) { @@ -66,12 +90,37 @@ AlternateProtocolsCacheImpl::protocolsFromString(absl::string_view alt_svc_strin AlternateProtocolsCacheImpl::AlternateProtocolsCacheImpl( TimeSource& time_source, std::unique_ptr&& key_value_store, size_t max_entries) : time_source_(time_source), key_value_store_(std::move(key_value_store)), - max_entries_(max_entries > 0 ? max_entries : 1024) {} + max_entries_(max_entries > 0 ? max_entries : 1024) { + if (key_value_store_) { + KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { + absl::optional> protocols = + protocolsFromString(value, time_source_, true); + absl::optional origin = stringToOrigin(key); + if (protocols.has_value() && origin.has_value()) { + setAlternativesImpl(origin.value(), protocols.value()); + } else { + ENVOY_LOG(warn, + fmt::format("Unable to parse cache entry with key: {} value: {}", key, value)); + } + return KeyValueStore::Iterate::Continue; + }; + key_value_store_->iterate(load); + } +} AlternateProtocolsCacheImpl::~AlternateProtocolsCacheImpl() = default; void AlternateProtocolsCacheImpl::setAlternatives(const Origin& origin, std::vector& protocols) { + setAlternativesImpl(origin, protocols); + if (key_value_store_) { + key_value_store_->addOrUpdate(originToString(origin), + protocolsToStringForCache(protocols, time_source_)); + } +} + +void AlternateProtocolsCacheImpl::setAlternativesImpl(const Origin& origin, + std::vector& protocols) { static const size_t max_protocols = 10; if (protocols.size() > max_protocols) { ENVOY_LOG_MISC(trace, "Too many alternate protocols: {}, truncating", protocols.size()); @@ -83,10 +132,6 @@ void AlternateProtocolsCacheImpl::setAlternatives(const Origin& origin, protocols_.erase(iter); } protocols_[origin] = protocols; - if (key_value_store_) { - key_value_store_->addOrUpdate(originToString(origin), - protocolsToStringForCache(protocols, time_source_)); - } } OptRef> @@ -95,7 +140,6 @@ AlternateProtocolsCacheImpl::findAlternatives(const Origin& origin) { if (entry_it == protocols_.end()) { return makeOptRefFromPtr>(nullptr); } - std::vector& protocols = entry_it->second; auto original_size = protocols.size(); diff --git a/source/common/http/alternate_protocols_cache_impl.h b/source/common/http/alternate_protocols_cache_impl.h index fe8d3de89fea..108cbcdf761d 100644 --- a/source/common/http/alternate_protocols_cache_impl.h +++ b/source/common/http/alternate_protocols_cache_impl.h @@ -27,6 +27,11 @@ class AlternateProtocolsCacheImpl : public AlternateProtocolsCache, size_t max_entries); ~AlternateProtocolsCacheImpl() override; + // Converts an Origin to a string which can be parsed by stringToOrigin. + static std::string originToString(const AlternateProtocolsCache::Origin& origin); + // Converts a string from originToString back to structured format. + static absl::optional stringToOrigin(const std::string& str); + // Convert an AlternateProtocol vector to a string to cache to the key value // store. Note that in order to determine the lifetime of entries, this // function will serialize ma= as absolute time from the epoch rather than @@ -50,6 +55,7 @@ class AlternateProtocolsCacheImpl : public AlternateProtocolsCache, size_t size() const override; private: + void setAlternativesImpl(const Origin& origin, std::vector& protocols); // Time source used to check expiration of entries. TimeSource& time_source_; diff --git a/source/common/http/alternate_protocols_cache_manager_impl.cc b/source/common/http/alternate_protocols_cache_manager_impl.cc index c0bf2016258c..b5ea969e00f8 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.cc +++ b/source/common/http/alternate_protocols_cache_manager_impl.cc @@ -22,7 +22,8 @@ AlternateProtocolsCacheManagerImpl::AlternateProtocolsCacheManagerImpl( } AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( - const envoy::config::core::v3::AlternateProtocolsCacheOptions& options) { + const envoy::config::core::v3::AlternateProtocolsCacheOptions& options, + Event::Dispatcher& dispatcher) { if (options.has_key_value_store_config() && data_.concurrency_ != 1) { throw EnvoyException( fmt::format("options has key value store but Envoy has concurrency = {} : {}", @@ -46,12 +47,12 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( MessageUtil::anyConvertAndValidate(options.key_value_store_config().typed_config(), kv_config, data_.validation_visitor_); auto& factory = Config::Utility::getAndCheckFactory(kv_config.config()); - store = factory.createStore(kv_config, data_.validation_visitor_, data_.dispatcher_, - data_.file_system_); + store = + factory.createStore(kv_config, data_.validation_visitor_, dispatcher, data_.file_system_); } AlternateProtocolsCacheSharedPtr new_cache = std::make_shared( - data_.dispatcher_.timeSource(), std::move(store), options.max_entries().value()); + dispatcher.timeSource(), std::move(store), options.max_entries().value()); (*slot_).caches_.emplace(options.name(), CacheWithOptions{options, new_cache}); return new_cache; } diff --git a/source/common/http/alternate_protocols_cache_manager_impl.h b/source/common/http/alternate_protocols_cache_manager_impl.h index 11746c935c1c..966a8ec35c09 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.h +++ b/source/common/http/alternate_protocols_cache_manager_impl.h @@ -30,7 +30,8 @@ class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager // AlternateProtocolsCacheManager AlternateProtocolsCacheSharedPtr - getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& options) override; + getCache(const envoy::config::core::v3::AlternateProtocolsCacheOptions& options, + Event::Dispatcher& dispatcher) override; private: // Contains a cache and the options associated with it. diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index e2d242f54326..0b7b4e3a115f 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -110,16 +110,9 @@ void CodecClient::onEvent(Network::ConnectionEvent event) { if (connected_) { reason = StreamResetReason::ConnectionTermination; if (protocol_error_) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors")) { - reason = StreamResetReason::ProtocolError; - connection_->streamInfo().setResponseFlag( - StreamInfo::ResponseFlag::UpstreamProtocolError); - } + reason = StreamResetReason::ProtocolError; + connection_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); } - } else { - ENVOY_CONN_LOG(warn, "Connection is closed by {} during connecting.", *connection_, - (event == Network::ConnectionEvent::RemoteClose ? "peer" : "self")); } while (!active_requests_.empty()) { // Fake resetting all active streams so that reset() callbacks get invoked. diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 684c962380f3..093e28304402 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -158,6 +158,11 @@ class CodecClient : protected Logger::Loggable, codec_callbacks_->onSettings(settings); } } + void onMaxStreamsChanged(uint32_t num_streams) override { + if (codec_callbacks_) { + codec_callbacks_->onMaxStreamsChanged(num_streams); + } + } void onIdleTimeout() { host_->cluster().stats().upstream_cx_idle_timeout_.inc(); diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 90f6be301b63..5a27e44a5f32 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -300,6 +300,7 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_); + new_stream->streamInfo().setDownstreamBytesMeter(response_encoder.getStream().bytesMeter()); // If the network connection is backed up, the stream should be made aware of it on creation. // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || @@ -704,10 +705,6 @@ void ConnectionManagerImpl::ActiveStream::completeRequest() { filter_manager_.streamInfo().setResponseFlag( StreamInfo::ResponseFlag::DownstreamConnectionTermination); } - // TODO(danzh) bring HTTP/3 to parity here. - if (connection_manager_.codec_->protocol() != Protocol::Http3) { - ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value()); - } connection_manager_.stats_.named_.downstream_rq_active_.dec(); if (filter_manager_.streamInfo().healthCheck()) { connection_manager_.config_.tracingStats().health_check_.inc(); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index b83e0aa264a0..c9a5b9eb5e1e 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -190,9 +190,7 @@ class ConnectionManagerImpl : Logger::Loggable, // Http::RequestDecoder void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(RequestTrailerMapPtr&& trailers) override; - const StreamInfo::StreamInfo& streamInfo() const override { - return filter_manager_.streamInfo(); - } + StreamInfo::StreamInfo& streamInfo() override { return filter_manager_.streamInfo(); } void sendLocalReply(Code code, absl::string_view body, const std::function& modify_headers, const absl::optional grpc_status, diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 41df7a465e8c..c75401a9ede5 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -187,8 +187,7 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m // If :scheme is not set, sets :scheme based on X-Forwarded-Proto if a valid scheme, // else encryption level. // X-Forwarded-Proto and :scheme may still differ if different values are sent from downstream. - if (!request_headers.Scheme() && - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.add_and_validate_scheme_header")) { + if (!request_headers.Scheme()) { request_headers.setScheme( getScheme(request_headers.getForwardedProtoValue(), connection.ssl() != nullptr)); } diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 8bc119a38f87..295220d79db9 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -113,8 +113,7 @@ void MultiplexedActiveClientBase::onGoAway(Http::GoAwayErrorCode) { // not considering http/2 connections connected until the SETTINGS frame is // received, but that would result in a latency penalty instead. void MultiplexedActiveClientBase::onSettings(ReceivedSettings& settings) { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.improved_stream_limit_handling") && - settings.maxConcurrentStreams().has_value() && + if (settings.maxConcurrentStreams().has_value() && settings.maxConcurrentStreams().value() < concurrent_stream_limit_) { int64_t old_unused_capacity = currentUnusedCapacity(); // Given config limits old_unused_capacity should never exceed int32_t. diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index 8a7ff29aefb6..7ed48ce6c54b 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -23,7 +23,6 @@ ConnectivityGrid::WrapperCallbacks::WrapperCallbacks(ConnectivityGrid& grid, grid_.dispatcher_.createTimer([this]() -> void { tryAnotherConnection(); })), current_(pool_it) {} -// TODO(#15649) add trace logging. ConnectivityGrid::WrapperCallbacks::ConnectionAttemptCallbacks::ConnectionAttemptCallbacks( WrapperCallbacks& parent, PoolIterator it) : parent_(parent), pool_it_(it), cancellable_(nullptr) {} @@ -54,7 +53,6 @@ void ConnectivityGrid::WrapperCallbacks::ConnectionAttemptCallbacks::onPoolFailu void ConnectivityGrid::WrapperCallbacks::onConnectionAttemptFailed( ConnectionAttemptCallbacks* attempt, ConnectionPool::PoolFailureReason reason, absl::string_view transport_failure_reason, Upstream::HostDescriptionConstSharedPtr host) { - ASSERT(host == grid_.host_); ENVOY_LOG(trace, "{} pool failed to create connection to host '{}'.", describePool(attempt->pool()), host->hostname()); if (grid_.isPoolHttp3(attempt->pool())) { @@ -107,7 +105,6 @@ void ConnectivityGrid::WrapperCallbacks::onConnectionAttemptReady( ConnectionAttemptCallbacks* attempt, RequestEncoder& encoder, Upstream::HostDescriptionConstSharedPtr host, const StreamInfo::StreamInfo& info, absl::optional protocol) { - ASSERT(host == grid_.host_); ENVOY_LOG(trace, "{} pool successfully connected to host '{}'.", describePool(attempt->pool()), host->hostname()); if (!grid_.isPoolHttp3(attempt->pool())) { diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 5f42ef6ee642..633fa6862d3d 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -687,6 +687,7 @@ class FilterManager : public ScopeTrackedObject, } // Http::FilterChainFactoryCallbacks + Event::Dispatcher& dispatcher() override { return dispatcher_; } void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { addStreamDecoderFilterWorker(filter, nullptr, false); filters_.push_back(filter.get()); diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 211aeed6aa1f..5ec89d6228c6 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -377,8 +377,7 @@ bool HeaderUtility::isRemovableHeader(absl::string_view header) { bool HeaderUtility::isModifiableHeader(absl::string_view header) { return (header.empty() || header[0] != ':') && - (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.treat_host_like_authority") || - !absl::EqualsIgnoreCase(header, Headers::get().HostLegacy.get())); + !absl::EqualsIgnoreCase(header, Headers::get().HostLegacy.get()); } HeaderUtility::HeaderValidationResult HeaderUtility::checkHeaderNameForUnderscores( @@ -405,7 +404,7 @@ HeaderUtility::HeaderValidationResult HeaderUtility::checkHeaderNameForUnderscor HeaderUtility::HeaderValidationResult HeaderUtility::validateContentLength(absl::string_view header_value, bool override_stream_error_on_invalid_http_message, - bool& should_close_connection) { + bool& should_close_connection, size_t& content_length_output) { should_close_connection = false; std::vector values = absl::StrSplit(header_value, ','); absl::optional content_length; @@ -430,6 +429,7 @@ HeaderUtility::validateContentLength(absl::string_view header_value, return HeaderValidationResult::REJECT; } } + content_length_output = content_length.value(); return HeaderValidationResult::ACCEPT; } diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 1053f893c34f..8e1ae6decd1c 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -258,13 +258,14 @@ class HeaderUtility { /** * Check if header_value represents a valid value for HTTP content-length header. - * Return HeaderValidationResult and populate should_close_connection - * according to override_stream_error_on_invalid_http_message. + * Return HeaderValidationResult and populate content_length_output if the value is valid, + * otherwise populate should_close_connection according to + * override_stream_error_on_invalid_http_message. */ static HeaderValidationResult validateContentLength(absl::string_view header_value, bool override_stream_error_on_invalid_http_message, - bool& should_close_connection); + bool& should_close_connection, size_t& content_length_output); }; } // namespace Http diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 77bc70065697..bcca80fd2582 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -80,16 +80,22 @@ StatefulHeaderKeyFormatterPtr statefulFormatterFromSettings(const Http::Http1Set return nullptr; } +constexpr size_t CRLF_SIZE = 2; + } // namespace const std::string StreamEncoderImpl::CRLF = "\r\n"; // Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1 const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; -StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection) +StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, + StreamInfo::BytesMeterSharedPtr&& bytes_meter) : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), connect_request_(false), is_tcp_tunneling_(false), is_response_to_head_request_(false), - is_response_to_connect_request_(false) { + is_response_to_connect_request_(false), bytes_meter_(bytes_meter) { + if (!bytes_meter_) { + bytes_meter_ = std::make_shared(); + } if (connection_.connection().aboveHighWatermark()) { runHighWatermarkCallbacks(); } @@ -99,12 +105,13 @@ void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const c uint32_t value_size) { ASSERT(key_size > 0); - + const uint64_t old_buffer_length = connection_.buffer().length(); connection_.copyToBuffer(key, key_size); connection_.addCharToBuffer(':'); connection_.addCharToBuffer(' '); connection_.copyToBuffer(value, value_size); connection_.addToBuffer(CRLF); + bytes_meter_->addHeaderBytesSent(connection_.buffer().length() - old_buffer_length); } void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { this->encodeHeader(key.data(), key.size(), value.data(), value.size()); @@ -189,9 +196,7 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // Also do not add content length for requests which should not have a // body, per https://tools.ietf.org/html/rfc7230#section-3.3.2 if (!status || (*status >= 200 && *status != 204)) { - if (!bodiless_request || - !Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests")) { + if (!bodiless_request) { encodeFormattedHeader(header_values.ContentLength.get(), "0", formatter); } } @@ -231,7 +236,7 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head if (end_stream) { endEncode(); } else { - connection_.flushOutput(); + flushOutput(); } } @@ -240,7 +245,8 @@ void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { // actually write the zero length buffer out. if (data.length() > 0) { if (chunk_encoding_) { - connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF)); + std::string chunk_header = absl::StrCat(absl::Hex(data.length()), CRLF); + connection_.buffer().add(std::move(chunk_header)); } connection_.buffer().move(data); @@ -253,10 +259,15 @@ void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { if (end_stream) { endEncode(); } else { - connection_.flushOutput(); + flushOutput(); } } +void StreamEncoderImpl::flushOutput(bool end_encode) { + auto encoded_bytes = connection_.flushOutput(end_encode); + bytes_meter_->addWireBytesSent(encoded_bytes); +} + void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { if (!connection_.enableTrailers()) { return endEncode(); @@ -274,11 +285,11 @@ void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { return HeaderMap::Iterate::Continue; }); - connection_.flushOutput(); + flushOutput(); connection_.buffer().add(CRLF); } - connection_.flushOutput(); + flushOutput(); connection_.onEncodeComplete(); } @@ -292,7 +303,7 @@ void StreamEncoderImpl::endEncode() { connection_.buffer().add(CRLF); } - connection_.flushOutput(true); + flushOutput(true); connection_.onEncodeComplete(); // With CONNECT or TCP tunneling, half-closing the connection is used to signal end stream. if (connect_request_ || is_tcp_tunneling_) { @@ -326,14 +337,16 @@ Status ServerConnectionImpl::doFloodProtectionChecks() const { return okStatus(); } -void ConnectionImpl::flushOutput(bool end_encode) { +uint64_t ConnectionImpl::flushOutput(bool end_encode) { if (end_encode) { // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood // protection maybeAddSentinelBufferFragment(*output_buffer_); } + const uint64_t bytes_encoded = output_buffer_->length(); connection().write(*output_buffer_, false); ASSERT(0UL == output_buffer_->length()); + return bytes_encoded; } void ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_->add(data); } @@ -497,10 +510,13 @@ Status ConnectionImpl::completeLastHeader() { ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); + auto& headers_or_trailers = headersOrTrailers(); + + // Account for ":" and "\r\n" bytes between the header key value pair. + getBytesMeter().addHeaderBytesReceived(CRLF_SIZE + 1); // TODO(10646): Switch to use HeaderUtility::checkHeaderNameForUnderscores(). RETURN_IF_ERROR(checkHeaderNameForUnderscores()); - auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace @@ -563,6 +579,10 @@ bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { return true; } +void ConnectionImpl::onDispatch(const Buffer::Instance& data) { + getBytesMeter().addWireBytesReceived(data.length()); +} + Http::Status ClientConnectionImpl::dispatch(Buffer::Instance& data) { Http::Status status = ConnectionImpl::dispatch(data); if (status.ok() && data.length() > 0) { @@ -585,6 +605,7 @@ Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { ASSERT(buffered_body_.length() == 0); dispatching_ = true; + onDispatch(data); if (maybeDirectDispatch(data)) { return Http::okStatus(); } @@ -655,6 +676,9 @@ Envoy::StatusOr ConnectionImpl::dispatchSlice(const char* slice, size_t Status ConnectionImpl::onHeaderField(const char* data, size_t length) { ASSERT(dispatching_); + + getBytesMeter().addHeaderBytesReceived(length); + // We previously already finished up the headers, these headers are // now trailers. if (header_parsing_state_ == HeaderParsingState::Done) { @@ -677,6 +701,9 @@ Status ConnectionImpl::onHeaderField(const char* data, size_t length) { Status ConnectionImpl::onHeaderValue(const char* data, size_t length) { ASSERT(dispatching_); + + getBytesMeter().addHeaderBytesReceived(length); + if (header_parsing_state_ == HeaderParsingState::Done && !enableTrailers()) { // Ignore trailers. return okStatus(); @@ -1024,8 +1051,7 @@ Status ServerConnectionImpl::handlePath(RequestHeaderMap& headers, absl::string_ headers.setHost(absolute_url.hostAndPort()); // Add the scheme and validate to ensure no https:// // requests are accepted over unencrypted connections by front-line Envoys. - if (!is_connect && - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.add_and_validate_scheme_header")) { + if (!is_connect) { headers.setScheme(absolute_url.scheme()); if (!HeaderUtility::schemeIsValid(absolute_url.scheme())) { RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidScheme)); @@ -1116,7 +1142,7 @@ Envoy::StatusOr ServerConnectionImpl::onHeadersCompleteBase() { Status ServerConnectionImpl::onMessageBeginBase() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); - active_request_.emplace(*this); + active_request_.emplace(*this, std::move(bytes_meter_before_stream_)); auto& active_request = active_request_.value(); if (resetStreamCalled()) { return codecClientError("cannot create new streams after calling reset"); @@ -1274,7 +1300,7 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode ASSERT(!pending_response_.has_value()); ASSERT(pending_response_done_); - pending_response_.emplace(*this, &response_decoder); + pending_response_.emplace(*this, std::move(bytes_meter_before_stream_), &response_decoder); pending_response_done_ = false; return pending_response_.value().encoder_; } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index b6f32a16d5f6..44df405a0a2a 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -83,8 +83,10 @@ class StreamEncoderImpl : public virtual StreamEncoder, void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { return bytes_meter_; } + protected: - StreamEncoderImpl(ConnectionImpl& connection); + StreamEncoderImpl(ConnectionImpl& connection, StreamInfo::BytesMeterSharedPtr&& bytes_meter); void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, bool end_stream, bool bodiless_request); void encodeTrailersBase(const HeaderMap& headers); @@ -127,7 +129,10 @@ class StreamEncoderImpl : public virtual StreamEncoder, void encodeFormattedHeader(absl::string_view key, absl::string_view value, HeaderKeyFormatterOptConstRef formatter); + void flushOutput(bool end_encode = false); + absl::string_view details_; + StreamInfo::BytesMeterSharedPtr bytes_meter_; }; /** @@ -135,8 +140,9 @@ class StreamEncoderImpl : public virtual StreamEncoder, */ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { public: - ResponseEncoderImpl(ConnectionImpl& connection, bool stream_error_on_invalid_http_message) - : StreamEncoderImpl(connection), + ResponseEncoderImpl(ConnectionImpl& connection, StreamInfo::BytesMeterSharedPtr&& bytes_meter, + bool stream_error_on_invalid_http_message) + : StreamEncoderImpl(connection, std::move(bytes_meter)), stream_error_on_invalid_http_message_(stream_error_on_invalid_http_message) {} ~ResponseEncoderImpl() override { @@ -175,7 +181,8 @@ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { */ class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { public: - RequestEncoderImpl(ConnectionImpl& connection) : StreamEncoderImpl(connection) {} + RequestEncoderImpl(ConnectionImpl& connection, StreamInfo::BytesMeterSharedPtr&& bytes_meter) + : StreamEncoderImpl(connection, std::move(bytes_meter)) {} bool upgradeRequest() const { return upgrade_request_; } bool headRequest() const { return head_request_; } bool connectRequest() const { return connect_request_; } @@ -210,6 +217,8 @@ class ConnectionImpl : public virtual Connection, */ virtual void onEncodeComplete() PURE; + virtual StreamInfo::BytesMeter& getBytesMeter() PURE; + /** * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only * valid operation after this point is for the connection to get blown away, but we will not @@ -220,7 +229,7 @@ class ConnectionImpl : public virtual Connection, /** * Flush all pending output from encoding. */ - void flushOutput(bool end_encode = false); + uint64_t flushOutput(bool end_encode = false); void addToBuffer(absl::string_view data); void addCharToBuffer(char c); @@ -311,6 +320,7 @@ class ConnectionImpl : public virtual Connection, bool dispatching_ : 1; bool dispatching_slice_already_drained_ : 1; const bool no_chunked_encoding_header_for_304_ : 1; + StreamInfo::BytesMeterSharedPtr bytes_meter_before_stream_; private: enum class HeaderParsingState { Field, Value, Done }; @@ -356,6 +366,8 @@ class ConnectionImpl : public virtual Connection, */ Envoy::StatusOr dispatchSlice(const char* slice, size_t len); + void onDispatch(const Buffer::Instance& data); + // ParserCallbacks. Status onHeaderField(const char* data, size_t length) override; Status onHeaderValue(const char* data, size_t length) override; @@ -456,8 +468,8 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { * An active HTTP/1.1 request. */ struct ActiveRequest { - ActiveRequest(ServerConnectionImpl& connection) - : response_encoder_(connection, + ActiveRequest(ServerConnectionImpl& connection, StreamInfo::BytesMeterSharedPtr&& bytes_meter) + : response_encoder_(connection, std::move(bytes_meter), connection.codec_settings_.stream_error_on_invalid_http_message_) {} void dumpState(std::ostream& os, int indent_level) const; @@ -488,6 +500,15 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { Status onUrl(const char* data, size_t length) override; // ConnectionImpl void onEncodeComplete() override; + StreamInfo::BytesMeter& getBytesMeter() override { + if (active_request_.has_value()) { + return *(active_request_->response_encoder_.getStream().bytesMeter()); + } + if (bytes_meter_before_stream_ == nullptr) { + bytes_meter_before_stream_ = std::make_shared(); + } + return *bytes_meter_before_stream_; + } Status onMessageBeginBase() override; Envoy::StatusOr onHeadersCompleteBase() override; // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. @@ -524,6 +545,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); void maybeAddSentinelBufferFragment(Buffer::Instance& output_buffer) override; + Status doFloodProtectionChecks() const; Status checkHeaderNameForUnderscores() override; @@ -559,9 +581,9 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { private: struct PendingResponse { - PendingResponse(ConnectionImpl& connection, ResponseDecoder* decoder) - : encoder_(connection), decoder_(decoder) {} - + PendingResponse(ConnectionImpl& connection, StreamInfo::BytesMeterSharedPtr&& bytes_meter, + ResponseDecoder* decoder) + : encoder_(connection, std::move(bytes_meter)), decoder_(decoder) {} RequestEncoderImpl encoder_; ResponseDecoder* decoder_; }; @@ -573,6 +595,15 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl Http::Status dispatch(Buffer::Instance& data) override; void onEncodeComplete() override {} + StreamInfo::BytesMeter& getBytesMeter() override { + if (pending_response_.has_value()) { + return *(pending_response_->encoder_.getStream().bytesMeter()); + } + if (bytes_meter_before_stream_ == nullptr) { + bytes_meter_before_stream_ = std::make_shared(); + } + return *bytes_meter_before_stream_; + } Status onMessageBeginBase() override { return okStatus(); } Envoy::StatusOr onHeadersCompleteBase() override; bool upgradeAllowed() const override; diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index bf948b8c8845..9ae282779949 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -276,8 +276,7 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { // waiting on window updates. We need to save the trailers so that we can emit them later. // However, for empty trailers, we don't need to to save the trailers. ASSERT(!pending_trailers_to_encode_); - const bool skip_encoding_empty_trailers = - trailers.empty() && parent_.skip_encoding_empty_trailers_; + const bool skip_encoding_empty_trailers = trailers.empty(); if (!skip_encoding_empty_trailers) { pending_trailers_to_encode_ = cloneTrailers(trailers); onLocalEndStream(); @@ -401,8 +400,7 @@ void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { ASSERT(local_end_stream_); - const bool skip_encoding_empty_trailers = - trailers.empty() && parent_.skip_encoding_empty_trailers_; + const bool skip_encoding_empty_trailers = trailers.empty(); if (skip_encoding_empty_trailers) { ENVOY_CONN_LOG(debug, "skipping submitting trailers", parent_.connection_); @@ -451,12 +449,11 @@ void ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we // "just know" that the frame header is 9 bytes. // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback - static const uint64_t FRAME_HEADER_SIZE = 9; parent_.protocol_constraints_.incrementOutboundDataFrameCount(); Buffer::OwnedImpl output; - parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); + parent_.addOutboundFrameFragment(output, framehd, H2_FRAME_HEADER_SIZE); if (!parent_.protocol_constraints_.checkOutboundFrameLimits().ok()) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); @@ -617,8 +614,6 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat stream_error_on_invalid_http_messaging_( http2_options.override_stream_error_on_invalid_http_message().value()), protocol_constraints_(stats, http2_options), - skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), skip_dispatching_frames_for_closed_connection_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.skip_dispatching_frames_for_closed_connection")), dispatching_(false), raised_goaway_(false), random_(random_generator), @@ -816,7 +811,6 @@ Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { connection_.state() == Network::Connection::State::Open); current_stream_id_ = hd->stream_id; - // Track all the frames without padding here, since this is the only callback we receive // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). @@ -883,6 +877,14 @@ Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { return okStatus(); } + // Track bytes sent and received. + if (frame->hd.type != METADATA_FRAME_TYPE) { + stream->bytes_meter_->addWireBytesReceived(frame->hd.length + H2_FRAME_HEADER_SIZE); + } + if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_CONTINUATION) { + stream->bytes_meter_->addHeaderBytesReceived(frame->hd.length + H2_FRAME_HEADER_SIZE); + } + switch (frame->hd.type) { case NGHTTP2_HEADERS: { stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; @@ -948,7 +950,17 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not. // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see // an outgoing frame of this type, we will return an error code so that we can abort execution. - ENVOY_CONN_LOG(trace, "sent frame type={}", connection_, static_cast(frame->hd.type)); + ENVOY_CONN_LOG(trace, "sent frame type={}, stream_id={}, length={}", connection_, + static_cast(frame->hd.type), frame->hd.stream_id, frame->hd.length); + StreamImpl* stream = getStream(frame->hd.stream_id); + if (stream != nullptr) { + if (frame->hd.type != METADATA_FRAME_TYPE) { + stream->bytes_meter_->addWireBytesSent(frame->hd.length + H2_FRAME_HEADER_SIZE); + } + if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_CONTINUATION) { + stream->bytes_meter_->addHeaderBytesSent(frame->hd.length + H2_FRAME_HEADER_SIZE); + } + } switch (frame->hd.type) { case NGHTTP2_GOAWAY: { ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); @@ -1516,8 +1528,7 @@ void ConnectionImpl::dumpState(std::ostream& os, int indent_level) const { os << spaces << "Http2::ConnectionImpl " << this << DUMP_MEMBER(max_headers_kb_) << DUMP_MEMBER(max_headers_count_) << DUMP_MEMBER(per_stream_buffer_limit_) << DUMP_MEMBER(allow_metadata_) << DUMP_MEMBER(stream_error_on_invalid_http_messaging_) - << DUMP_MEMBER(is_outbound_flood_monitored_control_frame_) - << DUMP_MEMBER(skip_encoding_empty_trailers_) << DUMP_MEMBER(dispatching_) + << DUMP_MEMBER(is_outbound_flood_monitored_control_frame_) << DUMP_MEMBER(dispatching_) << DUMP_MEMBER(raised_goaway_) << DUMP_MEMBER(pending_deferred_reset_streams_.size()) << '\n'; // Dump the protocol constraints @@ -1728,14 +1739,9 @@ ClientConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_contr } StreamResetReason ClientConnectionImpl::getMessagingErrorResetReason() const { - StreamResetReason reason = StreamResetReason::LocalReset; - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors")) { - reason = StreamResetReason::ProtocolError; - connection_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); - } + connection_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError); - return reason; + return StreamResetReason::ProtocolError; } ServerConnectionImpl::ServerConnectionImpl( diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 9b3363e740b9..9a8bfc7b7c6b 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -44,6 +44,7 @@ class Http2CodecImplTestFixture; // This is not the full client magic, but it's the smallest size that should be able to // differentiate between HTTP/1 and HTTP/2. const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; +constexpr uint64_t H2_FRAME_HEADER_SIZE = 9; class ReceivedSettingsImpl : public ReceivedSettings { public: @@ -282,10 +283,12 @@ class ConnectionImpl : public virtual Connection, void encodeDataHelper(Buffer::Instance& data, bool end_stream, bool skip_encoding_empty_trailers); + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { return bytes_meter_; } ConnectionImpl& parent_; int32_t stream_id_{-1}; uint32_t unconsumed_bytes_{0}; uint32_t read_disable_count_{0}; + StreamInfo::BytesMeterSharedPtr bytes_meter_{std::make_shared()}; Buffer::BufferMemoryAccountSharedPtr buffer_memory_account_; // Note that in current implementation the watermark callbacks of the pending_recv_data_ are @@ -522,12 +525,6 @@ class ConnectionImpl : public virtual Connection, // nghttp2 library will keep calling this callback to write the rest of the frame. ssize_t onSend(const uint8_t* data, size_t length); - // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have - // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of - // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is - // controlled by "envoy.reloadable_features.http2_skip_encoding_empty_trailers" runtime feature - // flag. - const bool skip_encoding_empty_trailers_; const bool skip_dispatching_frames_for_closed_connection_; // dumpState helper method. diff --git a/source/common/http/http3/codec_stats.h b/source/common/http/http3/codec_stats.h index 9e7ca5859b2f..aed84aeafe93 100644 --- a/source/common/http/http3/codec_stats.h +++ b/source/common/http/http3/codec_stats.h @@ -18,6 +18,7 @@ namespace Http3 { COUNTER(rx_reset) \ COUNTER(tx_reset) \ COUNTER(metadata_not_supported_error) \ + COUNTER(quic_version_h3_29) \ COUNTER(quic_version_rfc_v1) \ COUNTER(tx_flush_timeout) diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index 9bba3fa0221c..c4a53797c747 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -15,18 +15,36 @@ namespace Envoy { namespace Http { namespace Http3 { +namespace { + +uint32_t getMaxStreams(const Upstream::ClusterInfo& cluster) { + return PROTOBUF_GET_WRAPPED_OR_DEFAULT(cluster.http3Options().quic_protocol_options(), + max_concurrent_streams, 100); +} + +} // namespace + +ActiveClient::ActiveClient(Envoy::Http::HttpConnPoolImplBase& parent, + Upstream::Host::CreateConnectionData& data) + : MultiplexedActiveClientBase(parent, getMaxStreams(parent.host()->cluster()), + parent.host()->cluster().stats().upstream_cx_http3_total_, data) { +} + +void ActiveClient::onMaxStreamsChanged(uint32_t num_streams) { + updateCapacity(num_streams); + if (state() == ActiveClient::State::BUSY && currentUnusedCapacity() != 0) { + parent_.transitionActiveClientState(*this, ActiveClient::State::READY); + // If there's waiting streams, make sure the pool will now serve them. + parent_.onUpstreamReady(); + } +} void Http3ConnPoolImpl::setQuicConfigFromClusterConfig(const Upstream::ClusterInfo& cluster, quic::QuicConfig& quic_config) { + Quic::convertQuicConfig(cluster.http3Options().quic_protocol_options(), quic_config); quic::QuicTime::Delta crypto_timeout = quic::QuicTime::Delta::FromMilliseconds(cluster.connectTimeout().count()); quic_config.set_max_time_before_crypto_handshake(crypto_timeout); - int32_t max_streams = - cluster.http3Options().quic_protocol_options().max_concurrent_streams().value(); - quic_config.SetMaxBidirectionalStreamsToSend(max_streams); - quic_config.SetMaxUnidirectionalStreamsToSend(max_streams); - Quic::configQuicInitialFlowControlWindow(cluster.http3Options().quic_protocol_options(), - quic_config); } Http3ConnPoolImpl::Http3ConnPoolImpl( diff --git a/source/common/http/http3/conn_pool.h b/source/common/http/http3/conn_pool.h index 0886d4ddaf7a..2db0426630d6 100644 --- a/source/common/http/http3/conn_pool.h +++ b/source/common/http/http3/conn_pool.h @@ -22,16 +22,66 @@ namespace Http3 { class ActiveClient : public MultiplexedActiveClientBase { public: ActiveClient(Envoy::Http::HttpConnPoolImplBase& parent, - Upstream::Host::CreateConnectionData& data) - : MultiplexedActiveClientBase(parent, - parent.host() - ->cluster() - .http3Options() - .quic_protocol_options() - .max_concurrent_streams() - .value(), - parent.host()->cluster().stats().upstream_cx_http3_total_, - data) {} + Upstream::Host::CreateConnectionData& data); + + // Http::ConnectionCallbacks + void onMaxStreamsChanged(uint32_t num_streams) override; + + RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override { + ASSERT(quiche_capacity_ != 0); + // Each time a quic stream is allocated the quic capacity needs to get + // decremented. See comments by quiche_capacity_. + updateCapacity(quiche_capacity_ - 1); + return MultiplexedActiveClientBase::newStreamEncoder(response_decoder); + } + + // Overload the default capacity calculations to return the quic capacity + // (modified by any stream limits in Envoy config) + int64_t currentUnusedCapacity() const override { + return std::min(quiche_capacity_, effectiveConcurrentStreamLimit()); + } + + void updateCapacity(uint64_t new_quiche_capacity) { + // Each time we update the capacity make sure to reflect the update in the + // connection pool. + // + // Due to interplay between the max number of concurrent streams Envoy will + // allow and the max number of streams per connection this is not as simple + // as just updating based on the delta between quiche_capacity_ and + // new_quiche_capacity, so we use the delta between the actual calculated + // capacity before and after the update. + uint64_t old_capacity = currentUnusedCapacity(); + quiche_capacity_ = new_quiche_capacity; + uint64_t new_capacity = currentUnusedCapacity(); + + if (new_capacity < old_capacity) { + parent_.decrClusterStreamCapacity(old_capacity - new_capacity); + } else if (old_capacity < new_capacity) { + parent_.incrClusterStreamCapacity(new_capacity - old_capacity); + } + } + + // Unlike HTTP/2 and HTTP/1, rather than having a cap on the number of active + // streams, QUIC has a fixed number of streams available which is updated via + // the MAX_STREAMS frame. + // + // As such each time we create a new stream for QUIC, the capacity goes down + // by one, but unlike the other two codecs it is _not_ restored on stream + // closure. + // + // We track the QUIC capacity here, and overload currentUnusedCapacity so the + // connection pool can accurately keep track of when it is safe to create new + // streams. + // + // Though HTTP/3 should arguably start out with 0 stream capacity until the + // initial handshake is complete and MAX_STREAMS frame has been received, + // assume optimistically it will get ~100 streams, so that the connection pool + // won't fetch a connection for each incoming stream but will assume that the + // first connection will likely be able to serve 100. + // This number will be updated to the correct value before the connection is + // deemed connected, at which point further connections will be established if + // necessary. + uint64_t quiche_capacity_ = 100; }; // Http3 subclass of FixedHttpConnPoolImpl which exists to store quic data. @@ -54,6 +104,9 @@ class Http3ConnPoolImpl : public FixedHttpConnPoolImpl { quic::QuicConfig& quic_config); Quic::PersistentQuicInfoImpl& quicInfo() { return *quic_info_; } + // For HTTP/3 the base connection pool does not track stream capacity, rather + // the HTTP3 active client does. + bool trackStreamCapacity() override { return false; } private: // Store quic helpers which can be shared between connections and must live diff --git a/source/common/http/legacy_path_canonicalizer.cc b/source/common/http/legacy_path_canonicalizer.cc deleted file mode 100644 index e1798b8ec80c..000000000000 --- a/source/common/http/legacy_path_canonicalizer.cc +++ /dev/null @@ -1,25 +0,0 @@ -#include "source/common/http/legacy_path_canonicalizer.h" - -#include "source/common/chromium_url/url_canon.h" -#include "source/common/chromium_url/url_canon_stdstring.h" - -namespace Envoy { -namespace Http { - -absl::optional -LegacyPathCanonicalizer::canonicalizePath(absl::string_view original_path) { - std::string canonical_path; - chromium_url::Component in_component(0, original_path.size()); - chromium_url::Component out_component; - chromium_url::StdStringCanonOutput output(&canonical_path); - if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, - &out_component)) { - return absl::nullopt; - } else { - output.Complete(); - return absl::make_optional(std::move(canonical_path)); - } -} - -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/legacy_path_canonicalizer.h b/source/common/http/legacy_path_canonicalizer.h deleted file mode 100644 index 9b0543309d86..000000000000 --- a/source/common/http/legacy_path_canonicalizer.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include - -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" - -namespace Envoy { -namespace Http { - -/** - * Path canonicalizer based on //source/common/chromium_url. - */ -class LegacyPathCanonicalizer { -public: - // Returns the canonicalized path if successful. - static absl::optional canonicalizePath(absl::string_view original_path); -}; - -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/match_wrapper/config.cc b/source/common/http/match_wrapper/config.cc index 30c70adb2fac..2292ca4e2d6f 100644 --- a/source/common/http/match_wrapper/config.cc +++ b/source/common/http/match_wrapper/config.cc @@ -52,6 +52,7 @@ struct DelegatingFactoryCallbacks : public Envoy::Http::FilterChainFactoryCallba Matcher::MatchTreeSharedPtr match_tree) : delegated_callbacks_(delegated_callbacks), match_tree_(std::move(match_tree)) {} + Event::Dispatcher& dispatcher() override { return delegated_callbacks_.dispatcher(); } void addStreamDecoderFilter(Envoy::Http::StreamDecoderFilterSharedPtr filter) override { delegated_callbacks_.addStreamDecoderFilter(std::move(filter), match_tree_); } diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index 4afc26cd6ed2..ce5a88b1d037 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -1,7 +1,6 @@ #include "source/common/http/path_utility.h" #include "source/common/common/logger.h" -#include "source/common/http/legacy_path_canonicalizer.h" #include "source/common/runtime/runtime_features.h" #include "absl/strings/str_join.h" @@ -15,19 +14,15 @@ namespace Http { namespace { absl::optional canonicalizePath(absl::string_view original_path) { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.remove_forked_chromium_url")) { - std::string canonical_path; - url::Component in_component(0, original_path.size()); - url::Component out_component; - url::StdStringCanonOutput output(&canonical_path); - if (!url::CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { - return absl::nullopt; - } else { - output.Complete(); - return absl::make_optional(std::move(canonical_path)); - } + std::string canonical_path; + url::Component in_component(0, original_path.size()); + url::Component out_component; + url::StdStringCanonOutput output(&canonical_path); + if (!url::CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { + return absl::nullopt; } - return LegacyPathCanonicalizer::canonicalizePath(original_path); + output.Complete(); + return absl::make_optional(std::move(canonical_path)); } void unescapeInPath(std::string& path, absl::string_view escape_sequence, diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index ac822a951598..6f2a55da1056 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -463,6 +463,18 @@ std::string Utility::stripQueryString(const HeaderString& path) { query_offset != path_str.npos ? query_offset : path_str.size()); } +std::string Utility::replaceQueryString(const HeaderString& path, + const Utility::QueryParams& params) { + std::string new_path{Http::Utility::stripQueryString(path)}; + + if (!params.empty()) { + const auto new_query_string = Http::Utility::queryParamsToString(params); + absl::StrAppend(&new_path, new_query_string); + } + + return new_path; +} + std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { // TODO(wbpcode): Modify the headers parameter type to 'RequestHeaderMap'. return parseCookie(headers, key, Http::Headers::get().Cookie); diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 5038ebce1e69..a13922b7aee9 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -248,6 +248,20 @@ absl::string_view findQueryStringStart(const HeaderString& path); */ std::string stripQueryString(const HeaderString& path); +/** + * Replace the query string portion of a given path with a new one. + * + * e.g. replaceQueryString("/foo?key=1", {key:2}) -> "/foo?key=2" + * replaceQueryString("/bar", {hello:there}) -> "/bar?hello=there" + * + * @param path the original path that may or may not contain an existing query string + * @param params the new params whose string representation should be formatted onto + * the `path` above + * @return std::string the new path whose query string has been replaced by `params` and whose path + * portion from `path` remains unchanged. + */ +std::string replaceQueryString(const HeaderString& path, const QueryParams& params); + /** * Parse a particular value out of a cookie * @param headers supplies the headers to get the cookie from. diff --git a/source/common/matcher/matcher.h b/source/common/matcher/matcher.h index 8afa2a0b9639..c81a75a18c5a 100644 --- a/source/common/matcher/matcher.h +++ b/source/common/matcher/matcher.h @@ -25,12 +25,13 @@ namespace Matcher { template class ActionBase : public Action { public: - ActionBase() : type_name_(ProtoType().GetTypeName()) {} + absl::string_view typeUrl() const override { return staticTypeUrl(); } - absl::string_view typeUrl() const override { return type_name_; } + static absl::string_view staticTypeUrl() { + const static std::string typeUrl = ProtoType().GetTypeName(); -private: - const std::string type_name_; + return typeUrl; + } }; struct MaybeMatchResult { @@ -72,9 +73,9 @@ template using DataInputFactoryCb = std::function class MatchTreeFactory { public: MatchTreeFactory(ActionFactoryContext& context, - Server::Configuration::ServerFactoryContext& server_factory_context, + Server::Configuration::ServerFactoryContext& factory_context, MatchTreeValidationVisitor& validation_visitor) - : action_factory_context_(context), server_factory_context_(server_factory_context), + : action_factory_context_(context), server_factory_context_(factory_context), validation_visitor_(validation_visitor) {} // TODO(snowp): Remove this type parameter once we only have one Matcher proto. diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 114142e7040a..d75313240dd6 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -108,46 +108,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "apple_dns_lib", - srcs = select({ - "//bazel:apple": ["apple_dns_impl.cc"], - "//conditions:default": [], - }), - hdrs = select({ - "//bazel:apple": ["apple_dns_impl.h"], - "//conditions:default": [], - }), - deps = [ - ":address_lib", - ":utility_lib", - "//envoy/event:dispatcher_interface", - "//envoy/event:file_event_interface", - "//envoy/event:timer_interface", - "//envoy/network:dns_interface", - "//source/common/common:assert_lib", - "//source/common/common:backoff_lib", - "//source/common/common:linked_object", - "//source/common/singleton:threadsafe_singleton", - ], -) - -envoy_cc_library( - name = "dns_lib", - srcs = ["dns_impl.cc"], - hdrs = ["dns_impl.h"], - external_deps = ["ares"], - deps = [ - ":address_lib", - ":utility_lib", - "//envoy/event:dispatcher_interface", - "//envoy/event:file_event_interface", - "//envoy/network:dns_interface", - "//source/common/common:assert_lib", - "//source/common/common:linked_object", - ], -) - envoy_cc_library( name = "filter_lib", hdrs = ["filter_impl.h"], diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 8c4c756a1093..a820d012f770 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -1,7 +1,7 @@ #include "source/common/network/addr_family_aware_socket_option_impl.h" #include "envoy/common/exception.h" -#include "envoy/common/platform.h" +#include "envoy/common/optref.h" #include "envoy/config/core/v3/base.pb.h" #include "source/common/api/os_sys_calls_impl.h" @@ -14,11 +14,12 @@ namespace Network { namespace { -SocketOptionImplOptRef getOptionForSocket(const Socket& socket, SocketOptionImpl& ipv4_option, - SocketOptionImpl& ipv6_option) { +OptRef getOptionForSocket(const Socket& socket, + const Socket::Option& ipv4_option, + const Socket::Option& ipv6_option) { auto version = socket.ipVersion(); if (!version.has_value()) { - return absl::nullopt; + return {}; } // If the FD is v4, we can only try the IPv4 variant. @@ -38,7 +39,7 @@ SocketOptionImplOptRef getOptionForSocket(const Socket& socket, SocketOptionImpl bool AddrFamilyAwareSocketOptionImpl::setOption( Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const { - return setIpSocketOption(socket, state, ipv4_option_, ipv6_option_); + return setIpSocketOption(socket, state, *ipv4_option_, *ipv6_option_); } absl::optional AddrFamilyAwareSocketOptionImpl::getOptionDetails( @@ -49,21 +50,20 @@ absl::optional AddrFamilyAwareSocketOptionImpl::getOpti return absl::nullopt; } - return option->get().getOptionDetails(socket, state); + return option.value().get().getOptionDetails(socket, state); } bool AddrFamilyAwareSocketOptionImpl::setIpSocketOption( Socket& socket, envoy::config::core::v3::SocketOption::SocketState state, - const std::unique_ptr& ipv4_option, - const std::unique_ptr& ipv6_option) { - auto option = getOptionForSocket(socket, *ipv4_option, *ipv6_option); + const Socket::Option& ipv4_option, const Socket::Option& ipv6_option) { + auto option = getOptionForSocket(socket, ipv4_option, ipv6_option); if (!option.has_value()) { ENVOY_LOG(warn, "Failed to set IP socket option on non-IP socket"); return false; } - return option->get().setOption(socket, state); + return option.value().get().setOption(socket, state); } } // namespace Network diff --git a/source/common/network/addr_family_aware_socket_option_impl.h b/source/common/network/addr_family_aware_socket_option_impl.h index ff7ada58a947..5c7c5975467e 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.h +++ b/source/common/network/addr_family_aware_socket_option_impl.h @@ -29,6 +29,9 @@ class AddrFamilyAwareSocketOptionImpl : public Socket::Option, SocketOptionName ipv6_optname, absl::string_view ipv6_value) : ipv4_option_(std::make_unique(in_state, ipv4_optname, ipv4_value)), ipv6_option_(std::make_unique(in_state, ipv6_optname, ipv6_value)) {} + AddrFamilyAwareSocketOptionImpl(Socket::OptionConstPtr&& ipv4_option, + Socket::OptionConstPtr&& ipv6_option) + : ipv4_option_(std::move(ipv4_option)), ipv6_option_(std::move(ipv6_option)) {} // Socket::Option bool setOption(Socket& socket, @@ -41,6 +44,7 @@ class AddrFamilyAwareSocketOptionImpl : public Socket::Option, absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; + bool isSupported() const override { return true; } /** * Set a socket option that applies at both IPv4 and IPv6 socket levels. When the underlying FD @@ -59,12 +63,12 @@ class AddrFamilyAwareSocketOptionImpl : public Socket::Option, */ static bool setIpSocketOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state, - const std::unique_ptr& ipv4_option, - const std::unique_ptr& ipv6_option); + const Socket::Option& ipv4_option, + const Socket::Option& ipv6_option); private: - const std::unique_ptr ipv4_option_; - const std::unique_ptr ipv6_option_; + const Socket::OptionConstPtr ipv4_option_; + const Socket::OptionConstPtr ipv6_option_; }; } // namespace Network diff --git a/source/common/network/cidr_range.cc b/source/common/network/cidr_range.cc index cdbfe9fe1d60..39b2bbd1c22d 100644 --- a/source/common/network/cidr_range.cc +++ b/source/common/network/cidr_range.cc @@ -33,8 +33,6 @@ CidrRange::CidrRange(InstanceConstSharedPtr address, int length) } } -CidrRange::CidrRange(const CidrRange& other) = default; - CidrRange& CidrRange::operator=(const CidrRange& other) = default; bool CidrRange::operator==(const CidrRange& other) const { @@ -192,10 +190,11 @@ InstanceConstSharedPtr CidrRange::truncateIpAddressAndLength(InstanceConstShared } IpList::IpList(const Protobuf::RepeatedPtrField& cidrs) { + ip_list_.reserve(cidrs.size()); for (const envoy::config::core::v3::CidrRange& entry : cidrs) { CidrRange list_entry = CidrRange::create(entry); if (list_entry.isValid()) { - ip_list_.push_back(list_entry); + ip_list_.push_back(std::move(list_entry)); } else { throw EnvoyException( fmt::format("invalid ip/mask combo '{}/{}' (format is /<# mask bits>)", diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index 791f3de7b5d6..e8ee65b4b3d4 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -25,10 +25,8 @@ class CidrRange { */ CidrRange(); - /** - * Copies an existing CidrRange. - */ - CidrRange(const CidrRange& other); + CidrRange(const CidrRange& other) = default; + CidrRange(CidrRange&& other) = default; /** * Overwrites this with other. @@ -129,7 +127,6 @@ class IpList { IpList() = default; bool contains(const Instance& address) const; - bool empty() const { return ip_list_.empty(); } private: std::vector ip_list_; diff --git a/source/common/network/dns_resolver/BUILD b/source/common/network/dns_resolver/BUILD new file mode 100644 index 000000000000..1c1284e246d3 --- /dev/null +++ b/source/common/network/dns_resolver/BUILD @@ -0,0 +1,24 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "dns_factory_util_lib", + srcs = ["dns_factory_util.cc"], + hdrs = ["dns_factory_util.h"], + deps = [ + "//envoy/network:dns_resolver_interface", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/network/dns_resolver/apple/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/network/dns_resolver/cares/v3:pkg_cc_proto", + ], +) diff --git a/source/common/network/dns_resolver/dns_factory_util.cc b/source/common/network/dns_resolver/dns_factory_util.cc new file mode 100644 index 000000000000..d8d1712cab95 --- /dev/null +++ b/source/common/network/dns_resolver/dns_factory_util.cc @@ -0,0 +1,96 @@ +#include "source/common/network/dns_resolver/dns_factory_util.h" + +namespace Envoy { +namespace Network { + +// Create a default c-ares DNS resolver typed config. +void makeDefaultCaresDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(CaresDnsResolver)); +} + +// Create a default apple DNS resolver typed config. +void makeDefaultAppleDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + envoy::extensions::network::dns_resolver::apple::v3::AppleDnsResolverConfig apple; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(apple); + typed_dns_resolver_config.set_name(std::string(AppleDnsResolver)); +} + +// Create a default DNS resolver typed config based on build system and configuration. +void makeDefaultDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + // If use apple API for DNS lookups, create an AppleDnsResolverConfig typed config. + if (checkUseAppleApiForDnsLookups(typed_dns_resolver_config)) { + return; + } + // Otherwise, create a CaresDnsResolverConfig typed config. + makeDefaultCaresDnsResolverConfig(typed_dns_resolver_config); +} + +// If it is MacOS and the run time flag: envoy.restart_features.use_apple_api_for_dns_lookups +// is enabled, create an AppleDnsResolverConfig typed config. +bool checkUseAppleApiForDnsLookups( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + if (Runtime::runtimeFeatureEnabled("envoy.restart_features.use_apple_api_for_dns_lookups")) { + if (Config::Utility::getAndCheckFactoryByName( + std::string(AppleDnsResolver), true) != nullptr) { + makeDefaultAppleDnsResolverConfig(typed_dns_resolver_config); + ENVOY_LOG_MISC(debug, "create Apple DNS resolver type: {} in MacOS.", + typed_dns_resolver_config.name()); + return true; + } +#ifdef __APPLE__ + RELEASE_ASSERT(false, + "In MacOS, if run-time flag 'use_apple_api_for_dns_lookups' is enabled, " + "but the envoy.network.dns_resolver.apple extension is not included in Envoy " + "build file. This is wrong. Abort Envoy."); +#endif + } + return false; +} + +// Overloading the template function for DnsFilterConfig type, which doesn't need to copy anything. +void handleLegacyDnsResolverData( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ClientContextConfig&, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + makeDefaultCaresDnsResolverConfig(typed_dns_resolver_config); +} + +// Overloading the template function for Cluster config type, which need to copy +// both use_tcp_for_dns_lookups and dns_resolvers. +void handleLegacyDnsResolverData( + const envoy::config::cluster::v3::Cluster& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.mutable_dns_resolver_options()->set_use_tcp_for_dns_lookups( + config.use_tcp_for_dns_lookups()); + if (!config.dns_resolvers().empty()) { + cares.mutable_resolvers()->MergeFrom(config.dns_resolvers()); + } + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(CaresDnsResolver)); +} + +// Create the DNS resolver factory from the typed config. This is the underline +// function which performs the registry lookup based on typed config. +Network::DnsResolverFactory& createDnsResolverFactoryFromTypedConfig( + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + ENVOY_LOG_MISC(debug, "create DNS resolver type: {}", typed_dns_resolver_config.name()); + return Config::Utility::getAndCheckFactory( + typed_dns_resolver_config); +} + +// Create the default DNS resolver factory. apple for MacOS or c-ares for all others. +// The default registry lookup will always succeed, thus no exception throwing. +// This function can be called in main or worker threads. +Network::DnsResolverFactory& createDefaultDnsResolverFactory( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + Network::makeDefaultDnsResolverConfig(typed_dns_resolver_config); + return createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/dns_resolver/dns_factory_util.h b/source/common/network/dns_resolver/dns_factory_util.h new file mode 100644 index 000000000000..6eb1c0de0ac1 --- /dev/null +++ b/source/common/network/dns_resolver/dns_factory_util.h @@ -0,0 +1,140 @@ +#pragma once + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.pb.h" +#include "envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.pb.h" +#include "envoy/network/dns_resolver.h" + +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace Network { + +// Create a default c-ares DNS resolver typed config. +void makeDefaultCaresDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Create a default apple DNS resolver typed config. +void makeDefaultAppleDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Create a default DNS resolver typed config based on build system and configuration. +void makeDefaultDnsResolverConfig( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// If it is MacOS and the run time flag: envoy.restart_features.use_apple_api_for_dns_lookups +// is enabled, create an AppleDnsResolverConfig typed config. +bool checkUseAppleApiForDnsLookups( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// If the config has typed_dns_resolver_config, copy it over. +template +bool checkTypedDnsResolverConfigExist( + const ConfigType& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + if (config.has_typed_dns_resolver_config()) { + typed_dns_resolver_config.MergeFrom(config.typed_dns_resolver_config()); + return true; + } + return false; +} + +// If the config has dns_resolution_config, create a CaresDnsResolverConfig typed config based on +// it. +template +bool checkDnsResolutionConfigExist( + const ConfigType& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + if (config.has_dns_resolution_config()) { + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + if (!config.dns_resolution_config().resolvers().empty()) { + cares.mutable_resolvers()->MergeFrom(config.dns_resolution_config().resolvers()); + } + cares.mutable_dns_resolver_options()->MergeFrom( + config.dns_resolution_config().dns_resolver_options()); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(CaresDnsResolver)); + return true; + } + return false; +} + +// For backward compatibility, copy over use_tcp_for_dns_lookups from config, and create +// a CaresDnsResolverConfig typed config. This logic fit for bootstrap, and dns_cache config types. +template +void handleLegacyDnsResolverData( + const ConfigType& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.mutable_dns_resolver_options()->set_use_tcp_for_dns_lookups( + config.use_tcp_for_dns_lookups()); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(CaresDnsResolver)); +} + +// Overloading the template function for DnsFilterConfig type, which doesn't need to copy anything. +void handleLegacyDnsResolverData( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ClientContextConfig&, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Overloading the template function for Cluster config type, which need to copy +// both use_tcp_for_dns_lookups and dns_resolvers. +void handleLegacyDnsResolverData( + const envoy::config::cluster::v3::Cluster& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Make typed_dns_resolver_config from the passed @param config. +template +envoy::config::core::v3::TypedExtensionConfig makeDnsResolverConfig(const ConfigType& config) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + + // typed_dns_resolver_config takes precedence + if (checkTypedDnsResolverConfigExist(config, typed_dns_resolver_config)) { + return typed_dns_resolver_config; + } + + // If use apple API for DNS lookups, create an AppleDnsResolverConfig typed config. + if (checkUseAppleApiForDnsLookups(typed_dns_resolver_config)) { + return typed_dns_resolver_config; + } + + // If dns_resolution_config exits, create a CaresDnsResolverConfig typed config based on it. + if (checkDnsResolutionConfigExist(config, typed_dns_resolver_config)) { + return typed_dns_resolver_config; + } + + // Handle legacy DNS resolver fields for backward compatibility. + // Different config type has different fields to copy. + handleLegacyDnsResolverData(config, typed_dns_resolver_config); + return typed_dns_resolver_config; +} + +// Create the DNS resolver factory from the typed config. This is the underline +// function which performs the registry lookup based on typed config. +Network::DnsResolverFactory& createDnsResolverFactoryFromTypedConfig( + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Create the default DNS resolver factory. apple for MacOS or c-ares for all others. +// The default registry lookup will always succeed, thus no exception throwing. +// This function can be called in main or worker threads. +Network::DnsResolverFactory& createDefaultDnsResolverFactory( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); + +// Create the DNS resolver factory from the proto config. +// The passed in config parameter may contain invalid typed_dns_resolver_config. +// In that case, the underline registry lookup will throw an exception. +// This function has to be called in main thread. +template +Network::DnsResolverFactory& createDnsResolverFactoryFromProto( + const ConfigType& config, + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + ASSERT(Thread::MainThread::isMainOrTestThread()); + typed_dns_resolver_config = makeDnsResolverConfig(config); + return createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/happy_eyeballs_connection_impl.cc b/source/common/network/happy_eyeballs_connection_impl.cc index f64f1252142b..40802f0919fb 100644 --- a/source/common/network/happy_eyeballs_connection_impl.cc +++ b/source/common/network/happy_eyeballs_connection_impl.cc @@ -14,6 +14,7 @@ HappyEyeballsConnectionImpl::HappyEyeballsConnectionImpl( connection_construction_state_( {source_address, socket_factory, transport_socket_options, options}), next_attempt_timer_(dispatcher_.createTimer([this]() -> void { tryAnotherConnection(); })) { + ENVOY_LOG(trace, "New connection."); connections_.push_back(createNextConnection()); } @@ -314,6 +315,7 @@ void HappyEyeballsConnectionImpl::close(ConnectionCloseType type) { } connect_finished_ = true; + ENVOY_LOG(trace, "Disabling next attempt timer."); next_attempt_timer_->disableTimer(); for (size_t i = 0; i < connections_.size(); ++i) { connections_[i]->removeConnectionCallbacks(*callbacks_wrappers_[i]); @@ -351,7 +353,7 @@ void HappyEyeballsConnectionImpl::hashKey(std::vector& hash_key) const void HappyEyeballsConnectionImpl::setConnectionStats(const ConnectionStats& stats) { if (!connect_finished_) { - per_connection_state_.connection_stats_ = stats; + per_connection_state_.connection_stats_ = std::make_unique(stats); } for (auto& connection : connections_) { connection->setConnectionStats(stats); @@ -394,7 +396,7 @@ ClientConnectionPtr HappyEyeballsConnectionImpl::createNextConnection() { if (per_connection_state_.no_delay_.has_value()) { connection->noDelay(per_connection_state_.no_delay_.value()); } - if (per_connection_state_.connection_stats_.has_value()) { + if (per_connection_state_.connection_stats_) { connection->setConnectionStats(*per_connection_state_.connection_stats_); } if (per_connection_state_.buffer_limits_.has_value()) { @@ -415,6 +417,7 @@ ClientConnectionPtr HappyEyeballsConnectionImpl::createNextConnection() { } void HappyEyeballsConnectionImpl::tryAnotherConnection() { + ENVOY_LOG(trace, "Trying another connection."); connections_.push_back(createNextConnection()); connections_.back()->connect(); maybeScheduleNextAttempt(); @@ -424,15 +427,18 @@ void HappyEyeballsConnectionImpl::maybeScheduleNextAttempt() { if (next_address_ >= address_list_.size()) { return; } + ENVOY_LOG(trace, "Scheduling next attempt."); next_attempt_timer_->enableTimer(std::chrono::milliseconds(300)); } void HappyEyeballsConnectionImpl::onEvent(ConnectionEvent event, ConnectionCallbacksWrapper* wrapper) { if (event != ConnectionEvent::Connected) { + ENVOY_LOG(trace, "Connection failed to connect"); // This connection attempt has failed. If possible, start another connection attempt // immediately, instead of waiting for the timer. if (next_address_ < address_list_.size()) { + ENVOY_LOG(trace, "Disabling next attempt timer."); next_attempt_timer_->disableTimer(); tryAnotherConnection(); } @@ -454,8 +460,8 @@ void HappyEyeballsConnectionImpl::onEvent(ConnectionEvent event, void HappyEyeballsConnectionImpl::setUpFinalConnection(ConnectionEvent event, ConnectionCallbacksWrapper* wrapper) { connect_finished_ = true; + ENVOY_LOG(trace, "Disabling next attempt timer due to final connection."); next_attempt_timer_->disableTimer(); - // Remove the proxied connection callbacks from all connections. for (auto& w : callbacks_wrappers_) { w->connection().removeConnectionCallbacks(*w); @@ -466,6 +472,7 @@ void HappyEyeballsConnectionImpl::setUpFinalConnection(ConnectionEvent event, while (it != connections_.end()) { if (it->get() != &(wrapper->connection())) { (*it)->close(ConnectionCloseType::NoFlush); + dispatcher_.deferredDelete(std::move(*it)); it = connections_.erase(it); } else { ++it; @@ -533,6 +540,7 @@ void HappyEyeballsConnectionImpl::cleanupWrapperAndConnection(ConnectionCallback for (auto it = connections_.begin(); it != connections_.end();) { if (it->get() == &(wrapper->connection())) { (*it)->close(ConnectionCloseType::NoFlush); + dispatcher_.deferredDelete(std::move(*it)); it = connections_.erase(it); } else { ++it; diff --git a/source/common/network/happy_eyeballs_connection_impl.h b/source/common/network/happy_eyeballs_connection_impl.h index 5f24c659700e..4a64bc420d67 100644 --- a/source/common/network/happy_eyeballs_connection_impl.h +++ b/source/common/network/happy_eyeballs_connection_impl.h @@ -32,7 +32,8 @@ namespace Network { * TODO(RyanTheOptimist): Implement the Happy Eyeballs address sorting algorithm * either in the class or in the resolution code. */ -class HappyEyeballsConnectionImpl : public ClientConnection { +class HappyEyeballsConnectionImpl : public ClientConnection, + Logger::Loggable { public: HappyEyeballsConnectionImpl(Event::Dispatcher& dispatcher, const std::vector& address_list, @@ -161,7 +162,7 @@ class HappyEyeballsConnectionImpl : public ClientConnection { absl::optional detect_early_close_when_read_disabled_; absl::optional no_delay_; absl::optional enable_half_close_; - OptRef connection_stats_; + std::unique_ptr connection_stats_; absl::optional buffer_limits_; absl::optional start_secure_transport_; absl::optional delayed_close_timeout_; diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 4d4c84ac24a2..ca6af3ee9052 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -33,6 +33,13 @@ class ListenSocketImpl : public SocketImpl { void setupSocket(const Network::Socket::OptionsSharedPtr& options); void setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options); Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; + + void close() override { + if (io_handle_ != nullptr && io_handle_->isOpen()) { + io_handle_->close(); + } + } + bool isOpen() const override { return io_handle_ != nullptr && io_handle_->isOpen(); } }; /** @@ -79,6 +86,16 @@ template class NetworkListenSocket : public ListenSocketImpl { Socket::Type socketType() const override { return T::type; } + SocketPtr duplicate() override { + if (io_handle_ == nullptr) { + // This is a listen socket that does not bind to port. Pass nullptr socket options. + return std::make_unique>(connection_info_provider_->localAddress(), + /*options=*/nullptr, /*bind_to_port*/ false); + } else { + return ListenSocketImpl::duplicate(); + } + } + // These four overrides are introduced to perform check. A null io handle is possible only if the // the owner socket is a listen socket that does not bind to port. IoHandle& ioHandle() override { @@ -97,8 +114,9 @@ template class NetworkListenSocket : public ListenSocketImpl { } } bool isOpen() const override { - ASSERT(io_handle_ != nullptr); - return io_handle_->isOpen(); + return io_handle_ == nullptr ? false // Consider listen socket as closed if it does not bind to + // port. No fd will leak. + : io_handle_->isOpen(); } protected: diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index fd42517c7bd9..8fe7574c729c 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -138,8 +138,7 @@ class SocketOptionImpl : public Socket::Option, Logger::Loggable getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; - - bool isSupported() const; + bool isSupported() const override; /** * Set the option on the given socket. @@ -162,7 +161,5 @@ class SocketOptionImpl : public Socket::Option, Logger::Loggable value_; }; -using SocketOptionImplOptRef = absl::optional>; - } // namespace Network } // namespace Envoy diff --git a/source/common/network/win32_redirect_records_option_impl.h b/source/common/network/win32_redirect_records_option_impl.h index efa88048d970..6ec1707aab77 100644 --- a/source/common/network/win32_redirect_records_option_impl.h +++ b/source/common/network/win32_redirect_records_option_impl.h @@ -25,8 +25,8 @@ class Win32RedirectRecordsOptionImpl : public Socket::Option, absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState) const override; + bool isSupported() const override; - bool isSupported() const; static const Network::SocketOptionName& optionName(); private: diff --git a/source/common/protobuf/message_validator_impl.cc b/source/common/protobuf/message_validator_impl.cc index 2d9d3dbb334c..7908f688d3ac 100644 --- a/source/common/protobuf/message_validator_impl.cc +++ b/source/common/protobuf/message_validator_impl.cc @@ -25,10 +25,27 @@ void onDeprecatedFieldCommon(absl::string_view description, bool soft_deprecatio } } // namespace -void WarningValidationVisitorImpl::setUnknownCounter(Stats::Counter& counter) { +void WipCounterBase::setWipCounter(Stats::Counter& wip_counter) { + ASSERT(wip_counter_ == nullptr); + wip_counter_ = &wip_counter; + wip_counter.add(prestats_wip_count_); +} + +void WipCounterBase::onWorkInProgressCommon(absl::string_view description) { + ENVOY_LOG_MISC(warn, "{}", description); + if (wip_counter_ != nullptr) { + wip_counter_->inc(); + } else { + prestats_wip_count_++; + } +} + +void WarningValidationVisitorImpl::setCounters(Stats::Counter& unknown_counter, + Stats::Counter& wip_counter) { + setWipCounter(wip_counter); ASSERT(unknown_counter_ == nullptr); - unknown_counter_ = &counter; - counter.add(prestats_unknown_count_); + unknown_counter_ = &unknown_counter; + unknown_counter.add(prestats_unknown_count_); } void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) { @@ -53,6 +70,10 @@ void WarningValidationVisitorImpl::onDeprecatedField(absl::string_view descripti onDeprecatedFieldCommon(description, soft_deprecation); } +void WarningValidationVisitorImpl::onWorkInProgress(absl::string_view description) { + onWorkInProgressCommon(description); +} + void StrictValidationVisitorImpl::onUnknownField(absl::string_view description) { throw UnknownProtoFieldException( absl::StrCat("Protobuf message (", description, ") has unknown fields")); @@ -63,6 +84,10 @@ void StrictValidationVisitorImpl::onDeprecatedField(absl::string_view descriptio onDeprecatedFieldCommon(description, soft_deprecation); } +void StrictValidationVisitorImpl::onWorkInProgress(absl::string_view description) { + onWorkInProgressCommon(description); +} + ValidationVisitor& getNullValidationVisitor() { MUTABLE_CONSTRUCT_ON_FIRST_USE(NullValidationVisitorImpl); } diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index 865fc7e8ee55..5ddcdf5e59cd 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -16,24 +16,34 @@ class NullValidationVisitorImpl : public ValidationVisitor { // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view) override {} void onDeprecatedField(absl::string_view, bool) override {} - - // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return true; } + void onWorkInProgress(absl::string_view) override {} }; ValidationVisitor& getNullValidationVisitor(); +// Base class for both warning and strict validators. +class WipCounterBase { +protected: + void setWipCounter(Stats::Counter& wip_counter); + void onWorkInProgressCommon(absl::string_view description); + +private: + Stats::Counter* wip_counter_{}; + uint64_t prestats_wip_count_{}; +}; + class WarningValidationVisitorImpl : public ValidationVisitor, + public WipCounterBase, public Logger::Loggable { public: - void setUnknownCounter(Stats::Counter& counter); + void setCounters(Stats::Counter& unknown_counter, Stats::Counter& wip_counter); // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; - - // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return false; } + void onWorkInProgress(absl::string_view description) override; private: // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid @@ -45,16 +55,21 @@ class WarningValidationVisitorImpl : public ValidationVisitor, uint64_t prestats_unknown_count_{}; }; -class StrictValidationVisitorImpl : public ValidationVisitor { +class StrictValidationVisitorImpl : public ValidationVisitor, public WipCounterBase { public: - // Envoy::ProtobufMessage::ValidationVisitor - void onUnknownField(absl::string_view description) override; + void setCounters(Stats::Counter& wip_counter) { setWipCounter(wip_counter); } // Envoy::ProtobufMessage::ValidationVisitor + void onUnknownField(absl::string_view description) override; bool skipValidation() override { return false; } void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; + void onWorkInProgress(absl::string_view description) override; }; +// TODO(mattklein123): There are various places where the default strict validator is being used. +// This does not increment the WIP stat because nothing calls setCounters() on the stock/static +// version. We should remove this as a public function as well as the stock/static version and +// make sure that all code is either using the server validation context or the null validator. ValidationVisitor& getStrictValidationVisitor(); class ValidationContextImpl : public ValidationContext { @@ -77,23 +92,24 @@ class ProdValidationContextImpl : public ValidationContextImpl { public: ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields, bool ignore_unknown_dynamic_fields) - : ValidationContextImpl(allow_unknown_static_fields ? static_warning_validation_visitor_ - : getStrictValidationVisitor(), - allow_unknown_dynamic_fields - ? (ignore_unknown_dynamic_fields - ? ProtobufMessage::getNullValidationVisitor() - : dynamic_warning_validation_visitor_) - : ProtobufMessage::getStrictValidationVisitor()) {} - - ProtobufMessage::WarningValidationVisitorImpl& staticWarningValidationVisitor() { - return static_warning_validation_visitor_; - } - - ProtobufMessage::WarningValidationVisitorImpl& dynamicWarningValidationVisitor() { - return dynamic_warning_validation_visitor_; + : ValidationContextImpl( + allow_unknown_static_fields + ? static_cast(static_warning_validation_visitor_) + : strict_validation_visitor_, + allow_unknown_dynamic_fields + ? (ignore_unknown_dynamic_fields ? ProtobufMessage::getNullValidationVisitor() + : dynamic_warning_validation_visitor_) + : strict_validation_visitor_) {} + + void setCounters(Stats::Counter& static_unknown_counter, Stats::Counter& dynamic_unknown_counter, + Stats::Counter& wip_counter) { + strict_validation_visitor_.setCounters(wip_counter); + static_warning_validation_visitor_.setCounters(static_unknown_counter, wip_counter); + dynamic_warning_validation_visitor_.setCounters(dynamic_unknown_counter, wip_counter); } private: + StrictValidationVisitorImpl strict_validation_visitor_; ProtobufMessage::WarningValidationVisitorImpl static_warning_validation_visitor_; ProtobufMessage::WarningValidationVisitorImpl dynamic_warning_validation_visitor_; }; diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 330970589053..7e5d60d0752b 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -17,6 +17,8 @@ #include "absl/strings/match.h" #include "udpa/annotations/sensitive.pb.h" +#include "udpa/annotations/status.pb.h" +#include "xds/annotations/v3/status.pb.h" #include "yaml-cpp/yaml.h" using namespace std::chrono_literals; @@ -345,6 +347,11 @@ void checkForDeprecatedNonRepeatedEnumValue( message, validation_visitor); } +constexpr absl::string_view WipWarning = + "API features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. Do " + "not use this feature without understanding each of the previous points."; + class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { public: UnexpectedFieldProtoVisitor(ProtobufMessage::ValidationVisitor& validation_visitor, @@ -367,23 +374,14 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { return nullptr; } + const auto& field_status = field.options().GetExtension(xds::annotations::v3::field_status); + if (field_status.work_in_progress()) { + validation_visitor_.onWorkInProgress(fmt::format( + "field '{}' is marked as work-in-progress. {}", field.full_name(), WipWarning)); + } + // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { - if (absl::StartsWith(field.name(), "hidden_envoy_deprecated_")) { - // The field was marked as hidden_envoy_deprecated and an error must be thrown, - // unless it is part of an explicit test that needs access to the deprecated field - // when we enable runtime deprecation override to allow point field overrides for tests. - if (!runtime_ || - !runtime_->snapshot().deprecatedFeatureEnabled( - absl::StrCat("envoy.deprecated_features:", field.full_name()), false)) { - const std::string fatal_error = absl::StrCat( - "Illegal use of hidden_envoy_deprecated_ V2 field '", field.full_name(), - "' from file ", filename, - " while using the latest V3 configuration. This field has been removed from the " - "current Envoy API. Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); - throw ProtoValidationException(fatal_error, message); - } - } const std::string warning = absl::StrCat("Using {}deprecated option '", field.full_name(), "' from file ", filename, ". This configuration will be removed from " @@ -398,6 +396,24 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { } void onMessage(const Protobuf::Message& message, const void*) override { + if (message.GetDescriptor() + ->options() + .GetExtension(xds::annotations::v3::message_status) + .work_in_progress()) { + validation_visitor_.onWorkInProgress(fmt::format( + "message '{}' is marked as work-in-progress. {}", message.GetTypeName(), WipWarning)); + } + + const auto& udpa_file_options = + message.GetDescriptor()->file()->options().GetExtension(udpa::annotations::file_status); + const auto& xds_file_options = + message.GetDescriptor()->file()->options().GetExtension(xds::annotations::v3::file_status); + if (udpa_file_options.work_in_progress() || xds_file_options.work_in_progress()) { + validation_visitor_.onWorkInProgress( + fmt::format("message '{}' is contained in proto file '{}' marked as work-in-progress. {}", + message.GetTypeName(), message.GetDescriptor()->file()->name(), WipWarning)); + } + // Reject unknown fields. const auto& unknown_fields = message.GetReflection()->GetUnknownFields(message); if (!unknown_fields.empty()) { @@ -405,9 +421,6 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { for (int n = 0; n < unknown_fields.field_count(); ++n) { error_msg += absl::StrCat(n > 0 ? ", " : "", unknown_fields.field(n).number()); } - // We use the validation visitor but have hard coded behavior below for deprecated fields. - // TODO(htuch): Unify the deprecated and unknown visitor handling behind the validation - // visitor pattern. https://github.com/envoyproxy/envoy/issues/8092. if (!error_msg.empty()) { validation_visitor_.onUnknownField("type " + message.GetTypeName() + " with unknown field set {" + error_msg + "}"); @@ -616,9 +629,10 @@ bool redactAny(Protobuf::Message* message, bool ancestor_is_sensitive) { } // To redact a `TypedStruct`, we have to reify it based on its `type_url` to redact it. -bool redactTypedStruct(Protobuf::Message* message, bool ancestor_is_sensitive) { +bool redactTypedStruct(Protobuf::Message* message, const char* typed_struct_type, + bool ancestor_is_sensitive) { return redactOpaque( - message, ancestor_is_sensitive, "udpa.type.v1.TypedStruct", + message, ancestor_is_sensitive, typed_struct_type, [message](Protobuf::Message* typed_message, const Protobuf::Reflection* reflection, const Protobuf::FieldDescriptor* field_descriptor) { // To unpack a `TypedStruct`, convert the struct from JSON. @@ -636,7 +650,8 @@ bool redactTypedStruct(Protobuf::Message* message, bool ancestor_is_sensitive) { // Recursive helper method for MessageUtil::redact() below. void redact(Protobuf::Message* message, bool ancestor_is_sensitive) { if (redactAny(message, ancestor_is_sensitive) || - redactTypedStruct(message, ancestor_is_sensitive)) { + redactTypedStruct(message, "xds.type.v3.TypedStruct", ancestor_is_sensitive) || + redactTypedStruct(message, "udpa.type.v1.TypedStruct", ancestor_is_sensitive)) { return; } diff --git a/source/common/quic/BUILD b/source/common/quic/BUILD index f1fd86dd5c2b..a651426c188b 100644 --- a/source/common/quic/BUILD +++ b/source/common/quic/BUILD @@ -199,6 +199,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_ssl_connection_info_lib", + hdrs = ["quic_ssl_connection_info.h"], + external_deps = ["ssl"], + tags = ["nofips"], + deps = [ + "//source/extensions/transport_sockets/tls:connection_info_impl_base_lib", + "@com_github_google_quiche//:quic_core_session_lib", + ], +) + envoy_cc_library( name = "quic_filter_manager_connection_lib", srcs = ["quic_filter_manager_connection_impl.cc"], @@ -207,6 +218,7 @@ envoy_cc_library( deps = [ ":envoy_quic_simulated_watermark_buffer_lib", ":quic_network_connection_lib", + ":quic_ssl_connection_info_lib", ":send_buffer_monitor_lib", "//envoy/event:dispatcher_interface", "//envoy/network:connection_interface", @@ -261,6 +273,7 @@ envoy_cc_library( deps = [ ":envoy_quic_client_connection_lib", ":envoy_quic_crypto_stream_factory_lib", + ":envoy_quic_proof_verifier_lib", ":envoy_quic_stream_lib", ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index 304ff8bc7d34..5aa56cada9b6 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -55,10 +55,7 @@ ActiveQuicListener::ActiveQuicListener( kernel_worker_routing_(kernel_worker_routing), packets_to_read_to_connection_count_ratio_(packets_to_read_to_connection_count_ratio), crypto_server_stream_factory_(crypto_server_stream_factory) { - // This flag fix a QUICHE issue which may crash Envoy during connection close. - SetQuicReloadableFlag(quic_single_ack_in_packet2, true); - // Do not include 32-byte per-entry overhead while counting header size. - quiche::FlagRegistry::getInstance(); + ASSERT(GetQuicReloadableFlag(quic_single_ack_in_packet2)); ASSERT(!GetQuicFlag(FLAGS_quic_header_size_limit_includes_overhead)); if (Runtime::LoaderSingleton::getExisting()) { @@ -257,11 +254,7 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( : 20000; quic_config_.set_max_time_before_crypto_handshake( quic::QuicTime::Delta::FromMilliseconds(max_time_before_crypto_handshake_ms)); - int32_t max_streams = - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.quic_protocol_options(), max_concurrent_streams, 100); - quic_config_.SetMaxBidirectionalStreamsToSend(max_streams); - quic_config_.SetMaxUnidirectionalStreamsToSend(max_streams); - configQuicInitialFlowControlWindow(config.quic_protocol_options(), quic_config_); + convertQuicConfig(config.quic_protocol_options(), quic_config_); // Initialize crypto stream factory. envoy::config::core::v3::TypedExtensionConfig crypto_stream_config; diff --git a/source/common/quic/client_connection_factory_impl.cc b/source/common/quic/client_connection_factory_impl.cc index 00d86543436f..42a57d4c66f8 100644 --- a/source/common/quic/client_connection_factory_impl.cc +++ b/source/common/quic/client_connection_factory_impl.cc @@ -57,8 +57,7 @@ createQuicNetworkConnection(Http::PersistentQuicInfo& info, Event::Dispatcher& d Network::Address::InstanceConstSharedPtr server_addr, Network::Address::InstanceConstSharedPtr local_addr, QuicStatNames& quic_stat_names, Stats::Scope& scope) { - // This flag fix a QUICHE issue which may crash Envoy during connection close. - SetQuicReloadableFlag(quic_single_ack_in_packet2, true); + ASSERT(GetQuicReloadableFlag(quic_single_ack_in_packet2)); PersistentQuicInfoImpl* info_impl = reinterpret_cast(&info); auto config = info_impl->cryptoConfig(); if (config == nullptr) { diff --git a/source/common/quic/codec_impl.cc b/source/common/quic/codec_impl.cc index 21100bb78882..5ac2bab02dc0 100644 --- a/source/common/quic/codec_impl.cc +++ b/source/common/quic/codec_impl.cc @@ -79,9 +79,6 @@ Http::RequestEncoder& QuicHttpClientConnectionImpl::newStream(Http::ResponseDecoder& response_decoder) { EnvoyQuicClientStream* stream = quicStreamToEnvoyClientStream(quic_client_session_.CreateOutgoingBidirectionalStream()); - // TODO(danzh) handle stream creation failure gracefully. This can happen when - // there are already 100 open streams. In such case, caller should hold back - // the stream creation till an existing stream is closed. ASSERT(stream != nullptr, "Fail to create QUIC stream."); stream->setResponseDecoder(response_decoder); if (quic_client_session_.aboveHighWatermark()) { diff --git a/source/common/quic/envoy_quic_alarm_factory.h b/source/common/quic/envoy_quic_alarm_factory.h index ca491b2005ec..2350c80e8e81 100644 --- a/source/common/quic/envoy_quic_alarm_factory.h +++ b/source/common/quic/envoy_quic_alarm_factory.h @@ -3,19 +3,10 @@ #include "source/common/common/non_copyable.h" #include "source/common/quic/envoy_quic_alarm.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#endif - #include "quiche/quic/core/quic_alarm_factory.h" #include "quiche/quic/core/quic_arena_scoped_ptr.h" #include "quiche/quic/core/quic_one_block_arena.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_client_connection.cc b/source/common/quic/envoy_quic_client_connection.cc index 003851a8bb8c..0a50d81b11bf 100644 --- a/source/common/quic/envoy_quic_client_connection.cc +++ b/source/common/quic/envoy_quic_client_connection.cc @@ -7,7 +7,6 @@ #include "source/common/network/listen_socket_impl.h" #include "source/common/network/socket_option_factory.h" #include "source/common/network/udp_packet_writer_handler_impl.h" -#include "source/common/quic/envoy_quic_packet_writer.h" #include "source/common/quic/envoy_quic_utils.h" namespace Envoy { @@ -33,7 +32,7 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( server_connection_id, helper, alarm_factory, new EnvoyQuicPacketWriter( std::make_unique(connection_socket->ioHandle())), - true, supported_versions, dispatcher, std::move(connection_socket)) {} + /*owns_writer=*/true, supported_versions, dispatcher, std::move(connection_socket)) {} EnvoyQuicClientConnection::EnvoyQuicClientConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, @@ -71,21 +70,25 @@ uint64_t EnvoyQuicClientConnection::maxDatagramSize() const { return Network::DEFAULT_UDP_MAX_DATAGRAM_SIZE; } -void EnvoyQuicClientConnection::setUpConnectionSocket(OptRef delegate) { +void EnvoyQuicClientConnection::setUpConnectionSocket(Network::ConnectionSocket& connection_socket, + OptRef delegate) { delegate_ = delegate; - if (connectionSocket()->ioHandle().isOpen()) { - connectionSocket()->ioHandle().initializeFileEvent( - dispatcher_, [this](uint32_t events) -> void { onFileEvent(events); }, + if (connection_socket.ioHandle().isOpen()) { + connection_socket.ioHandle().initializeFileEvent( + dispatcher_, + [this, &connection_socket](uint32_t events) -> void { + onFileEvent(events, connection_socket); + }, Event::PlatformDefaultTriggerType, Event::FileReadyType::Read | Event::FileReadyType::Write); - if (!Network::Socket::applyOptions(connectionSocket()->options(), *connectionSocket(), + if (!Network::Socket::applyOptions(connection_socket.options(), connection_socket, envoy::config::core::v3::SocketOption::STATE_LISTENING)) { ENVOY_CONN_LOG(error, "Fail to apply listening options", *this); - connectionSocket()->close(); + connection_socket.close(); } } - if (!connectionSocket()->ioHandle().isOpen()) { + if (!connection_socket.ioHandle().isOpen()) { CloseConnection(quic::QUIC_CONNECTION_CANCELLED, "Fail to set up connection socket.", quic::ConnectionCloseBehavior::SILENT_CLOSE); } @@ -100,9 +103,9 @@ void EnvoyQuicClientConnection::switchConnectionSocket( quic::QuicSocketAddress peer_address = envoyIpAddressToQuicSocketAddress( connection_socket->connectionInfoProvider().remoteAddress()->ip()); - // The old socket is closed in this call. + // The old socket is not closed in this call, because it could still receive useful packets. setConnectionSocket(std::move(connection_socket)); - setUpConnectionSocket(delegate_); + setUpConnectionSocket(*connectionSocket(), delegate_); if (connection_migration_use_new_cid()) { MigratePath(self_address, peer_address, writer.release(), true); } else { @@ -110,7 +113,72 @@ void EnvoyQuicClientConnection::switchConnectionSocket( } } -void EnvoyQuicClientConnection::onFileEvent(uint32_t events) { +void EnvoyQuicClientConnection::OnPathDegradingDetected() { + QuicConnection::OnPathDegradingDetected(); + maybeMigratePort(); +} + +void EnvoyQuicClientConnection::maybeMigratePort() { + if (!IsHandshakeConfirmed() || !connection_migration_use_new_cid() || + HasPendingPathValidation() || !migrate_port_on_path_degrading_) { + return; + } + + const Network::Address::InstanceConstSharedPtr& current_local_address = + connectionSocket()->connectionInfoProvider().localAddress(); + // Creates an IP address with unset port. The port will be set when the new socket is created. + Network::Address::InstanceConstSharedPtr new_local_address; + if (current_local_address->ip()->version() == Network::Address::IpVersion::v4) { + new_local_address = std::make_shared( + current_local_address->ip()->addressAsString()); + } else { + new_local_address = std::make_shared( + current_local_address->ip()->addressAsString()); + } + + // The probing socket will have the same host but a different port. + auto probing_socket = createConnectionSocket( + connectionSocket()->connectionInfoProvider().remoteAddress(), new_local_address, nullptr); + setUpConnectionSocket(*probing_socket, delegate_); + auto writer = std::make_unique( + std::make_unique(probing_socket->ioHandle())); + quic::QuicSocketAddress self_address = envoyIpAddressToQuicSocketAddress( + probing_socket->connectionInfoProvider().localAddress()->ip()); + quic::QuicSocketAddress peer_address = envoyIpAddressToQuicSocketAddress( + probing_socket->connectionInfoProvider().remoteAddress()->ip()); + + auto context = std::make_unique( + self_address, peer_address, std::move(writer), std::move(probing_socket)); + ValidatePath(std::move(context), std::make_unique(*this)); +} + +void EnvoyQuicClientConnection::onPathValidationSuccess( + std::unique_ptr context) { + auto envoy_context = + static_cast(context.get()); + + auto probing_socket = envoy_context->releaseSocket(); + if (MigratePath(envoy_context->self_address(), envoy_context->peer_address(), + envoy_context->releaseWriter(), true)) { + // probing_socket will be set as the new default socket. But old sockets are still able to + // receive packets. + setConnectionSocket(std::move(probing_socket)); + return; + } + // MigratePath should always succeed since the migration happens after path + // validation. + ENVOY_CONN_LOG(error, "connection fails to migrate path after validation", *this); +} + +void EnvoyQuicClientConnection::onPathValidationFailure( + std::unique_ptr /*context*/) { + // Note that the probing socket and probing writer will be deleted once context goes out of + // scope. + OnPathValidationFailureAtClient(); +} + +void EnvoyQuicClientConnection::onFileEvent(uint32_t events, + Network::ConnectionSocket& connection_socket) { ENVOY_CONN_LOG(trace, "socket event: {}", *this, events); ASSERT(events & (Event::FileReadyType::Read | Event::FileReadyType::Write)); @@ -118,25 +186,78 @@ void EnvoyQuicClientConnection::onFileEvent(uint32_t events) { OnCanWrite(); } + bool is_probing_socket = + HasPendingPathValidation() && + (&connection_socket == + &static_cast( + GetPathValidationContext()) + ->probingSocket()); + // It's possible for a write event callback to close the connection, in such case ignore read // event processing. // TODO(mattklein123): Right now QUIC client is hard coded to use GRO because it is probably the // right default for QUIC. Determine whether this should be configurable or not. if (connected() && (events & Event::FileReadyType::Read)) { Api::IoErrorPtr err = Network::Utility::readPacketsFromSocket( - connectionSocket()->ioHandle(), - *connectionSocket()->connectionInfoProvider().localAddress(), *this, - dispatcher_.timeSource(), true, packets_dropped_); + connection_socket.ioHandle(), *connection_socket.connectionInfoProvider().localAddress(), + *this, dispatcher_.timeSource(), true, packets_dropped_); if (err == nullptr) { - connectionSocket()->ioHandle().activateFileEvents(Event::FileReadyType::Read); - return; - } - if (err->getErrorCode() != Api::IoError::IoErrorCode::Again) { + // In the case where the path validation fails, the probing socket will be closed and its IO + // events are no longer interesting. + if (!is_probing_socket || HasPendingPathValidation() || + connectionSocket().get() == &connection_socket) { + connection_socket.ioHandle().activateFileEvents(Event::FileReadyType::Read); + return; + } + + } else if (err->getErrorCode() != Api::IoError::IoErrorCode::Again) { ENVOY_CONN_LOG(error, "recvmsg result {}: {}", *this, static_cast(err->getErrorCode()), err->getErrorDetails()); } } } +EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::EnvoyQuicPathValidationContext( + quic::QuicSocketAddress& self_address, quic::QuicSocketAddress& peer_address, + std::unique_ptr writer, + std::unique_ptr probing_socket) + : QuicPathValidationContext(self_address, peer_address), writer_(std::move(writer)), + socket_(std::move(probing_socket)) {} + +EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::~EnvoyQuicPathValidationContext() = + default; + +quic::QuicPacketWriter* EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::WriterToUse() { + return writer_.get(); +} + +EnvoyQuicPacketWriter* EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::releaseWriter() { + return writer_.release(); +} + +std::unique_ptr +EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::releaseSocket() { + return std::move(socket_); +} + +Network::ConnectionSocket& +EnvoyQuicClientConnection::EnvoyQuicPathValidationContext::probingSocket() { + return *socket_; +} + +EnvoyQuicClientConnection::EnvoyPathValidationResultDelegate::EnvoyPathValidationResultDelegate( + EnvoyQuicClientConnection& connection) + : connection_(connection) {} + +void EnvoyQuicClientConnection::EnvoyPathValidationResultDelegate::OnPathValidationSuccess( + std::unique_ptr context) { + connection_.onPathValidationSuccess(std::move(context)); +} + +void EnvoyQuicClientConnection::EnvoyPathValidationResultDelegate::OnPathValidationFailure( + std::unique_ptr context) { + connection_.onPathValidationFailure(std::move(context)); +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_client_connection.h b/source/common/quic/envoy_quic_client_connection.h index f0cc8db28aba..f24a67ce067c 100644 --- a/source/common/quic/envoy_quic_client_connection.h +++ b/source/common/quic/envoy_quic_client_connection.h @@ -3,21 +3,12 @@ #include "envoy/event/dispatcher.h" #include "source/common/network/utility.h" +#include "source/common/quic/envoy_quic_packet_writer.h" #include "source/common/quic/envoy_quic_utils.h" #include "source/common/quic/quic_network_connection.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - #include "quiche/quic/core/quic_connection.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - namespace Envoy { namespace Quic { @@ -68,12 +59,63 @@ class EnvoyQuicClientConnection : public quic::QuicConnection, } // Register file event and apply socket options. - void setUpConnectionSocket(OptRef delegate); + void setUpConnectionSocket(Network::ConnectionSocket& connection_socket, + OptRef delegate); // Switch underlying socket with the given one. This is used in connection migration. void switchConnectionSocket(Network::ConnectionSocketPtr&& connection_socket); + // Potentially trigger migration. + void OnPathDegradingDetected() override; + + // Called when port migration probing succeeds. Attempts to migrate this connection onto the new + // socket extracted from context. + void onPathValidationSuccess(std::unique_ptr context); + + // Called when port migration probing fails. The probing socket from context will go out of scope + // and be destructed. + void onPathValidationFailure(std::unique_ptr context); + + void setMigratePortOnPathDegrading(bool migrate_port_on_path_degrading) { + migrate_port_on_path_degrading_ = migrate_port_on_path_degrading; + } + private: + // Holds all components needed for a QUIC connection probing/migration. + class EnvoyQuicPathValidationContext : public quic::QuicPathValidationContext { + public: + EnvoyQuicPathValidationContext(quic::QuicSocketAddress& self_address, + quic::QuicSocketAddress& peer_address, + std::unique_ptr writer, + std::unique_ptr probing_socket); + + ~EnvoyQuicPathValidationContext() override; + + quic::QuicPacketWriter* WriterToUse() override; + + EnvoyQuicPacketWriter* releaseWriter(); + + Network::ConnectionSocket& probingSocket(); + + std::unique_ptr releaseSocket(); + + private: + std::unique_ptr writer_; + Network::ConnectionSocketPtr socket_; + }; + + // Receives notifications from the Quiche layer on path validation results. + class EnvoyPathValidationResultDelegate : public quic::QuicPathValidator::ResultDelegate { + public: + explicit EnvoyPathValidationResultDelegate(EnvoyQuicClientConnection& connection); + + void OnPathValidationSuccess(std::unique_ptr context) override; + + void OnPathValidationFailure(std::unique_ptr context) override; + + private: + EnvoyQuicClientConnection& connection_; + }; EnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory, @@ -81,11 +123,14 @@ class EnvoyQuicClientConnection : public quic::QuicConnection, Event::Dispatcher& dispatcher, Network::ConnectionSocketPtr&& connection_socket); - void onFileEvent(uint32_t events); + void onFileEvent(uint32_t events, Network::ConnectionSocket& connection_socket); + + void maybeMigratePort(); OptRef delegate_; uint32_t packets_dropped_{0}; Event::Dispatcher& dispatcher_; + bool migrate_port_on_path_degrading_{false}; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index 8820ab1c0032..4cf14c31db3b 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -1,7 +1,25 @@ #include "source/common/quic/envoy_quic_client_session.h" +#include "source/common/quic/envoy_quic_proof_verifier.h" #include "source/common/quic/envoy_quic_utils.h" +#include "quic_filter_manager_connection_impl.h" + +namespace quic { +namespace test { + +// TODO(alyssawilk) add the necessary accessors to quiche and remove this. +class QuicSessionPeer { +public: + static quic::QuicStreamIdManager& + getStreamIdManager(Envoy::Quic::EnvoyQuicClientSession* session) { + return session->ietf_streamid_manager_.bidirectional_stream_id_manager_; + } +}; + +} // namespace test +} // namespace quic + namespace Envoy { namespace Quic { @@ -16,19 +34,22 @@ EnvoyQuicClientSession::EnvoyQuicClientSession( send_buffer_limit), quic::QuicSpdyClientSession(config, supported_versions, connection.release(), server_id, crypto_config.get(), push_promise_index), - host_name_(server_id.host()), crypto_config_(crypto_config), - crypto_stream_factory_(crypto_stream_factory), quic_stat_names_(quic_stat_names), - scope_(scope) {} + crypto_config_(crypto_config), crypto_stream_factory_(crypto_stream_factory), + quic_stat_names_(quic_stat_names), scope_(scope) { + quic_ssl_info_ = std::make_shared(*this); +} EnvoyQuicClientSession::~EnvoyQuicClientSession() { ASSERT(!connection()->connected()); network_connection_ = nullptr; } -absl::string_view EnvoyQuicClientSession::requestedServerName() const { return host_name_; } +absl::string_view EnvoyQuicClientSession::requestedServerName() const { return server_id().host(); } void EnvoyQuicClientSession::connect() { - dynamic_cast(network_connection_)->setUpConnectionSocket(*this); + dynamic_cast(network_connection_) + ->setUpConnectionSocket( + *static_cast(connection())->connectionSocket(), *this); // Start version negotiation and crypto handshake during which the connection may fail if server // doesn't support the one and only supported version. CryptoConnect(); @@ -75,11 +96,13 @@ void EnvoyQuicClientSession::OnRstStream(const quic::QuicRstStreamFrame& frame) /*from_self*/ false, /*is_upstream*/ true); } -void EnvoyQuicClientSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) { - quic::QuicSpdyClientSession::SetDefaultEncryptionLevel(level); - if (level == quic::ENCRYPTION_FORWARD_SECURE) { - // This is only reached once, when handshake is done. - raiseConnectionEvent(Network::ConnectionEvent::Connected); +void EnvoyQuicClientSession::OnCanCreateNewOutgoingStream(bool unidirectional) { + if (!http_connection_callbacks_ || unidirectional) { + return; + } + uint32_t streams_available = streamsAvailable(); + if (streams_available > 0) { + http_connection_callbacks_->onMaxStreamsChanged(streams_available); } } @@ -111,9 +134,22 @@ quic::QuicConnection* EnvoyQuicClientSession::quicConnection() { return initialized_ ? connection() : nullptr; } +uint64_t EnvoyQuicClientSession::streamsAvailable() { + quic::QuicStreamIdManager& manager = quic::test::QuicSessionPeer::getStreamIdManager(this); + ASSERT(manager.outgoing_max_streams() >= manager.outgoing_stream_count()); + uint32_t streams_available = manager.outgoing_max_streams() - manager.outgoing_stream_count(); + return streams_available; +} + void EnvoyQuicClientSession::OnTlsHandshakeComplete() { quic::QuicSpdyClientSession::OnTlsHandshakeComplete(); - raiseConnectionEvent(Network::ConnectionEvent::Connected); + + // TODO(alyssawilk) support the case where a connection starts with 0 max streams. + ASSERT(streamsAvailable()); + if (streamsAvailable() > 0) { + OnCanCreateNewOutgoingStream(false); + raiseConnectionEvent(Network::ConnectionEvent::Connected); + } } std::unique_ptr EnvoyQuicClientSession::CreateQuicCryptoStream() { @@ -122,5 +158,46 @@ std::unique_ptr EnvoyQuicClientSession::Create this, /*has_application_state = */ version().UsesHttp3()); } +void EnvoyQuicClientSession::setHttp3Options( + const envoy::config::core::v3::Http3ProtocolOptions& http3_options) { + QuicFilterManagerConnectionImpl::setHttp3Options(http3_options); + if (!http3_options_->has_quic_protocol_options()) { + return; + } + static_cast(connection()) + ->setMigratePortOnPathDegrading(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + http3_options.quic_protocol_options(), num_timeouts_to_trigger_port_migration, 1)); + + if (http3_options_->quic_protocol_options().has_connection_keepalive()) { + const uint64_t initial_interval = PROTOBUF_GET_MS_OR_DEFAULT( + http3_options_->quic_protocol_options().connection_keepalive(), initial_interval, 0); + const uint64_t max_interval = + PROTOBUF_GET_MS_OR_DEFAULT(http3_options_->quic_protocol_options().connection_keepalive(), + max_interval, quic::kPingTimeoutSecs); + // If the keepalive max_interval is configured to zero, disable the probe completely. + if (max_interval == 0u) { + disable_keepalive_ = true; + return; + } + connection()->set_ping_timeout(quic::QuicTime::Delta::FromMilliseconds(max_interval)); + if (max_interval > initial_interval && initial_interval > 0u) { + connection()->set_initial_retransmittable_on_wire_timeout( + quic::QuicTime::Delta::FromMilliseconds(initial_interval)); + } + } +} + +bool EnvoyQuicClientSession::ShouldKeepConnectionAlive() const { + // Do not probe at all if keepalive is disabled via config. + return !disable_keepalive_ && quic::QuicSpdyClientSession::ShouldKeepConnectionAlive(); +} + +void EnvoyQuicClientSession::OnProofVerifyDetailsAvailable( + const quic::ProofVerifyDetails& verify_details) { + if (static_cast(verify_details).isValid()) { + quic_ssl_info_->onCertValidated(); + } +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index bdf7c96c49dd..59beb7083532 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -1,27 +1,18 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#pragma GCC diagnostic ignored "-Wtype-limits" -#endif - -#include "quiche/quic/core/http/quic_spdy_client_session.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_client_stream.h" #include "source/common/quic/envoy_quic_client_connection.h" -#include "source/common/quic/quic_filter_manager_connection_impl.h" +#include "source/common/quic/envoy_quic_client_stream.h" #include "source/common/quic/envoy_quic_crypto_stream_factory.h" +#include "source/common/quic/quic_filter_manager_connection_impl.h" #include "source/common/quic/quic_stat_names.h" +#include "quiche/quic/core/http/quic_spdy_client_session.h" + namespace Envoy { namespace Quic { +class EnvoyQuicClientSession; + // Act as a Network::ClientConnection to ClientCodec. // TODO(danzh) This class doesn't need to inherit Network::FilterManager // interface but need all other Network::Connection implementation in @@ -69,8 +60,11 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) override; void OnRstStream(const quic::QuicRstStreamFrame& frame) override; + // quic::QuicSpdyClientSessionBase - void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; + bool ShouldKeepConnectionAlive() const override; + // quic::ProofHandler + void OnProofVerifyDetailsAvailable(const quic::ProofVerifyDetails& verify_details) override; // PacketsToReadDelegate size_t numPacketsExpectedPerEventLoop() override { @@ -79,6 +73,12 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, return std::max(1, GetNumActiveStreams()) * Network::NUM_DATAGRAMS_PER_RECEIVE; } + // QuicFilterManagerConnectionImpl + void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) override; + + // Notify any registered connection pool when new streams are available. + void OnCanCreateNewOutgoingStream(bool) override; + using quic::QuicSpdyClientSession::PerformActionOnActiveStreams; protected: @@ -88,7 +88,12 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, quic::QuicSpdyStream* CreateIncomingStream(quic::QuicStreamId id) override; quic::QuicSpdyStream* CreateIncomingStream(quic::PendingStream* pending) override; std::unique_ptr CreateQuicCryptoStream() override; - + bool ShouldCreateOutgoingBidirectionalStream() override { + ASSERT(quic::QuicSpdyClientSession::ShouldCreateOutgoingBidirectionalStream()); + // Prefer creating an "invalid" stream outside of current stream bounds to + // crashing when dereferencing a nullptr in QuicHttpClientConnectionImpl::newStream + return true; + } // QuicFilterManagerConnectionImpl bool hasDataToWrite() override; // Used by base class to access quic connection after initialization. @@ -96,15 +101,16 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, quic::QuicConnection* quicConnection() override; private: + uint64_t streamsAvailable(); + // These callbacks are owned by network filters and quic session should outlive // them. Http::ConnectionCallbacks* http_connection_callbacks_{nullptr}; - // TODO(danzh) deprecate this field once server_id() is made const. - const std::string host_name_; std::shared_ptr crypto_config_; EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory_; QuicStatNames& quic_stat_names_; Stats::Scope& scope_; + bool disable_keepalive_{false}; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_client_stream.cc b/source/common/quic/envoy_quic_client_stream.cc index 1d413afc6097..222f35f56e78 100644 --- a/source/common/quic/envoy_quic_client_stream.cc +++ b/source/common/quic/envoy_quic_client_stream.cc @@ -1,29 +1,18 @@ #include "source/common/quic/envoy_quic_client_stream.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - -#include "quiche/quic/core/quic_session.h" -#include "quiche/quic/core/http/quic_header_list.h" -#include "quiche/spdy/core/spdy_header_block.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_utils.h" -#include "source/common/quic/envoy_quic_client_session.h" - #include "source/common/buffer/buffer_impl.h" +#include "source/common/common/assert.h" +#include "source/common/common/enum_to_int.h" #include "source/common/http/codes.h" #include "source/common/http/header_map_impl.h" #include "source/common/http/header_utility.h" #include "source/common/http/utility.h" -#include "source/common/common/enum_to_int.h" -#include "source/common/common/assert.h" +#include "source/common/quic/envoy_quic_client_session.h" +#include "source/common/quic/envoy_quic_utils.h" + +#include "quiche/quic/core/http/quic_header_list.h" +#include "quiche/quic/core/quic_session.h" +#include "quiche/spdy/core/spdy_header_block.h" namespace Envoy { namespace Quic { @@ -53,15 +42,19 @@ Http::Status EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& local_end_stream_ = end_stream; SendBufferMonitor::ScopedWatermarkBufferUpdater updater(this, this); auto spdy_headers = envoyHeadersToSpdyHeaderBlock(headers); - if (headers.Method() && headers.Method()->value() == "CONNECT") { - // It is a bytestream connect and should have :path and :protocol set accordingly - // As HTTP/1.1 does not require a path for CONNECT, we may have to add one - // if shifting codecs. For now, default to "/" - this can be made - // configurable if necessary. - // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 - spdy_headers[":protocol"] = Http::Headers::get().ProtocolValues.Bytestream; - if (!headers.Path()) { - spdy_headers[":path"] = "/"; + if (headers.Method()) { + if (headers.Method()->value() == "CONNECT") { + // It is a bytestream connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 + spdy_headers[":protocol"] = Http::Headers::get().ProtocolValues.Bytestream; + if (!headers.Path()) { + spdy_headers[":path"] = "/"; + } + } else if (headers.Method()->value() == "HEAD") { + sent_head_request_ = true; } } WriteHeaders(std::move(spdy_headers), end_stream, nullptr); @@ -190,6 +183,9 @@ void EnvoyQuicClientStream::OnInitialHeadersComplete(bool fin, size_t frame_len, } else if (status != enumToInt(Http::Code::Continue)) { response_decoder_->decodeHeaders(std::move(headers), /*end_stream=*/fin); + if (status == enumToInt(Http::Code::NotModified)) { + got_304_response_ = true; + } } ConsumeHeaderList(); @@ -224,6 +220,11 @@ void EnvoyQuicClientStream::OnBodyAvailable() { if (fin_read_and_no_trailers) { end_stream_decoded_ = true; } + updateReceivedContentBytes(buffer->length(), fin_read_and_no_trailers); + if (stream_error() != quic::QUIC_STREAM_NO_ERROR) { + // A stream error has occurred, stop processing. + return; + } response_decoder_->decodeData(*buffer, fin_read_and_no_trailers); } @@ -255,6 +256,11 @@ void EnvoyQuicClientStream::maybeDecodeTrailers() { if (sequencer()->IsClosed() && !FinishedReadingTrailers()) { // Only decode trailers after finishing decoding body. end_stream_decoded_ = true; + updateReceivedContentBytes(0, true); + if (stream_error() != quic::QUIC_STREAM_NO_ERROR) { + // A stream error has occurred, stop processing. + return; + } quic::QuicRstStreamErrorCode transform_rst = quic::QUIC_STREAM_NO_ERROR; auto trailers = spdyHeaderBlockToEnvoyTrailers( received_trailers(), filterManagerConnection()->maxIncomingHeadersCount(), *this, details_, @@ -280,7 +286,9 @@ void EnvoyQuicClientStream::ResetWithError(quic::QuicResetStreamError error) { stats_.tx_reset_.inc(); // Upper layers expect calling resetStream() to immediately raise reset callbacks. runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error.internal_code())); - quic::QuicSpdyClientStream::ResetWithError(error); + if (session()->connection()->connected()) { + quic::QuicSpdyClientStream::ResetWithError(error); + } } void EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error, diff --git a/source/common/quic/envoy_quic_client_stream.h b/source/common/quic/envoy_quic_client_stream.h index f763121944a1..165d751680f9 100644 --- a/source/common/quic/envoy_quic_client_stream.h +++ b/source/common/quic/envoy_quic_client_stream.h @@ -2,20 +2,10 @@ #include "envoy/buffer/buffer.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif +#include "source/common/quic/envoy_quic_stream.h" #include "quiche/quic/core/http/quic_spdy_client_stream.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_stream.h" - namespace Envoy { namespace Quic { @@ -73,17 +63,15 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, // Http::MultiplexedStreamImplBase bool hasPendingData() override; + void onStreamError(absl::optional should_close_connection, + quic::QuicRstStreamErrorCode rst_code) override; + private: QuicFilterManagerConnectionImpl* filterManagerConnection(); // Deliver awaiting trailers if body has been delivered. void maybeDecodeTrailers(); - // Either reset the stream or close the connection according to - // should_close_connection and configured http3 options. - void onStreamError(absl::optional should_close_connection, - quic::QuicRstStreamErrorCode rst_code); - Http::ResponseDecoder* response_decoder_{nullptr}; bool decoded_100_continue_{false}; diff --git a/source/common/quic/envoy_quic_connection_helper.h b/source/common/quic/envoy_quic_connection_helper.h index a8d8ba2bbc94..c699263467d6 100644 --- a/source/common/quic/envoy_quic_connection_helper.h +++ b/source/common/quic/envoy_quic_connection_helper.h @@ -1,22 +1,11 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#pragma GCC diagnostic ignored "-Wtype-limits" -#endif +#include "source/common/quic/platform/envoy_quic_clock.h" #include "quiche/quic/core/crypto/quic_random.h" #include "quiche/quic/core/quic_connection.h" #include "quiche/quic/core/quic_simple_buffer_allocator.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/platform/envoy_quic_clock.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_crypto_stream_factory.h b/source/common/quic/envoy_quic_crypto_stream_factory.h index fd8151984ee2..cbfcbfebe678 100644 --- a/source/common/quic/envoy_quic_crypto_stream_factory.h +++ b/source/common/quic/envoy_quic_crypto_stream_factory.h @@ -4,21 +4,11 @@ #include "envoy/config/typed_config.h" #include "envoy/network/transport_socket.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - -#include "quiche/quic/core/quic_crypto_server_stream_base.h" #include "quiche/quic/core/crypto/quic_crypto_server_config.h" -#include "quiche/quic/core/tls_server_handshaker.h" -#include "quiche/quic/core/quic_session.h" #include "quiche/quic/core/quic_crypto_client_stream.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif +#include "quiche/quic/core/quic_crypto_server_stream_base.h" +#include "quiche/quic/core/quic_session.h" +#include "quiche/quic/core/tls_server_handshaker.h" namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_dispatcher.h b/source/common/quic/envoy_quic_dispatcher.h index 77ed2ffcb361..97d04adb048d 100644 --- a/source/common/quic/envoy_quic_dispatcher.h +++ b/source/common/quic/envoy_quic_dispatcher.h @@ -1,27 +1,17 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#pragma GCC diagnostic ignored "-Wtype-limits" -#endif - -#include "quiche/quic/core/quic_dispatcher.h" -#include "quiche/quic/core/quic_utils.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - #include #include "envoy/network/listener.h" -#include "source/server/connection_handler_impl.h" -#include "source/server/active_listener_base.h" + #include "source/common/quic/envoy_quic_crypto_stream_factory.h" #include "source/common/quic/envoy_quic_server_session.h" #include "source/common/quic/quic_stat_names.h" +#include "source/server/active_listener_base.h" +#include "source/server/connection_handler_impl.h" + +#include "quiche/quic/core/quic_dispatcher.h" +#include "quiche/quic/core/quic_utils.h" namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_packet_writer.h b/source/common/quic/envoy_quic_packet_writer.h index 560742e44e57..08cee2d7e190 100644 --- a/source/common/quic/envoy_quic_packet_writer.h +++ b/source/common/quic/envoy_quic_packet_writer.h @@ -1,19 +1,9 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif +#include "envoy/network/udp_packet_writer_handler.h" #include "quiche/quic/core/quic_packet_writer.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "envoy/network/udp_packet_writer_handler.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_proof_source_base.cc b/source/common/quic/envoy_quic_proof_source_base.cc index a59adcb246de..958bdcc0c644 100644 --- a/source/common/quic/envoy_quic_proof_source_base.cc +++ b/source/common/quic/envoy_quic_proof_source_base.cc @@ -1,18 +1,9 @@ #include "source/common/quic/envoy_quic_proof_source_base.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#endif +#include "source/common/quic/envoy_quic_utils.h" #include "quiche/quic/core/quic_data_writer.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_utils.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_proof_source_base.h b/source/common/quic/envoy_quic_proof_source_base.h index 9988924e71d1..036fe44a8699 100644 --- a/source/common/quic/envoy_quic_proof_source_base.h +++ b/source/common/quic/envoy_quic_proof_source_base.h @@ -2,29 +2,19 @@ #include +#include "envoy/network/filter.h" + #include "source/common/common/assert.h" +#include "source/common/common/logger.h" #include "absl/strings/str_cat.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#endif - +#include "openssl/ssl.h" +#include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/core/crypto/proof_source.h" #include "quiche/quic/core/quic_versions.h" -#include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/platform/api/quic_reference_counted.h" #include "quiche/quic/platform/api/quic_socket_address.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "openssl/ssl.h" -#include "envoy/network/filter.h" -#include "source/common/common/logger.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_proof_source_factory_interface.h b/source/common/quic/envoy_quic_proof_source_factory_interface.h index a6f69a176158..92df7df9fd0d 100644 --- a/source/common/quic/envoy_quic_proof_source_factory_interface.h +++ b/source/common/quic/envoy_quic_proof_source_factory_interface.h @@ -1,23 +1,13 @@ #pragma once #include "envoy/config/typed_config.h" +#include "envoy/network/filter.h" +#include "envoy/network/socket.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif +#include "source/server/active_listener_base.h" #include "quiche/quic/core/crypto/proof_source.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "envoy/network/socket.h" -#include "envoy/network/filter.h" -#include "source/server/active_listener_base.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_proof_verifier.cc b/source/common/quic/envoy_quic_proof_verifier.cc index 4a2c3245180d..99336a3b73a4 100644 --- a/source/common/quic/envoy_quic_proof_verifier.cc +++ b/source/common/quic/envoy_quic_proof_verifier.cc @@ -1,7 +1,7 @@ #include "source/common/quic/envoy_quic_proof_verifier.h" #include "source/common/quic/envoy_quic_utils.h" -#include "source/extensions/transport_sockets/tls/cert_validator/default_validator.h" +#include "source/extensions/transport_sockets/tls/utility.h" #include "quiche/quic/core/crypto/certificate_view.h" @@ -9,10 +9,25 @@ namespace Envoy { namespace Quic { quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( + const std::string& hostname, const uint16_t port, const std::vector& certs, + const std::string& ocsp_response, const std::string& cert_sct, + const quic::ProofVerifyContext* context, std::string* error_details, + std::unique_ptr* details, uint8_t* out_alert, + std::unique_ptr callback) { + ASSERT(details != nullptr); + if (doVerifyCertChain(hostname, port, certs, ocsp_response, cert_sct, context, error_details, + out_alert, std::move(callback))) { + *details = std::make_unique(true); + return quic::QUIC_SUCCESS; + } + *details = std::make_unique(false); + return quic::QUIC_FAILURE; +} + +bool EnvoyQuicProofVerifier::doVerifyCertChain( const std::string& hostname, const uint16_t /*port*/, const std::vector& certs, const std::string& /*ocsp_response*/, const std::string& /*cert_sct*/, - const quic::ProofVerifyContext* /*context*/, std::string* error_details, - std::unique_ptr* /*details*/, uint8_t* /*out_alert*/, + const quic::ProofVerifyContext* /*context*/, std::string* error_details, uint8_t* /*out_alert*/, std::unique_ptr /*callback*/) { ASSERT(!certs.empty()); bssl::UniquePtr intermediates(sk_X509_new_null()); @@ -20,7 +35,7 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( for (size_t i = 0; i < certs.size(); i++) { bssl::UniquePtr cert = parseDERCertificate(certs[i], error_details); if (!cert) { - return quic::QUIC_FAILURE; + return false; } if (i == 0) { leaf = std::move(cert); @@ -33,7 +48,7 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( ASSERT(cert_view != nullptr); int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details); if (sign_alg == 0) { - return quic::QUIC_FAILURE; + return false; } // We down cast rather than add verifyCertChain to Envoy::Ssl::Context because // verifyCertChain uses a bunch of SSL-specific structs which we want to keep @@ -41,17 +56,16 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( bool success = static_cast(context_.get()) ->verifyCertChain(*leaf, *intermediates, *error_details); if (!success) { - return quic::QUIC_FAILURE; + return false; } for (const absl::string_view& config_san : cert_view->subject_alt_name_domains()) { - if (Extensions::TransportSockets::Tls::DefaultCertValidator::dnsNameMatch(hostname, - config_san)) { - return quic::QUIC_SUCCESS; + if (Extensions::TransportSockets::Tls::Utility::dnsNameMatch(hostname, config_san)) { + return true; } } *error_details = absl::StrCat("Leaf certificate doesn't match hostname: ", hostname); - return quic::QUIC_FAILURE; + return false; } } // namespace Quic diff --git a/source/common/quic/envoy_quic_proof_verifier.h b/source/common/quic/envoy_quic_proof_verifier.h index 6574ab434bd0..95b8ffa9726e 100644 --- a/source/common/quic/envoy_quic_proof_verifier.h +++ b/source/common/quic/envoy_quic_proof_verifier.h @@ -6,6 +6,18 @@ namespace Envoy { namespace Quic { +class CertVerifyResult : public quic::ProofVerifyDetails { +public: + explicit CertVerifyResult(bool is_valid) : is_valid_(is_valid) {} + + ProofVerifyDetails* Clone() const override { return new CertVerifyResult(is_valid_); } + + bool isValid() const { return is_valid_; } + +private: + bool is_valid_{false}; +}; + // A quic::ProofVerifier implementation which verifies cert chain using SSL // client context config. class EnvoyQuicProofVerifier : public EnvoyQuicProofVerifierBase { @@ -25,6 +37,12 @@ class EnvoyQuicProofVerifier : public EnvoyQuicProofVerifierBase { std::unique_ptr callback) override; private: + bool doVerifyCertChain(const std::string& hostname, const uint16_t port, + const std::vector& certs, const std::string& ocsp_response, + const std::string& cert_sct, const quic::ProofVerifyContext* context, + std::string* error_details, uint8_t* out_alert, + std::unique_ptr callback); + Envoy::Ssl::ClientContextSharedPtr context_; }; diff --git a/source/common/quic/envoy_quic_proof_verifier_base.h b/source/common/quic/envoy_quic_proof_verifier_base.h index 45bdd2a4161b..b754f5487d4c 100644 --- a/source/common/quic/envoy_quic_proof_verifier_base.h +++ b/source/common/quic/envoy_quic_proof_verifier_base.h @@ -1,21 +1,11 @@ #pragma once -#include "absl/strings/str_cat.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#endif +#include "source/common/common/logger.h" +#include "absl/strings/str_cat.h" #include "quiche/quic/core/crypto/proof_verifier.h" #include "quiche/quic/core/quic_versions.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/common/logger.h" - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_server_connection.h b/source/common/quic/envoy_quic_server_connection.h index 707deb412ec9..de763fcb4c7d 100644 --- a/source/common/quic/envoy_quic_server_connection.h +++ b/source/common/quic/envoy_quic_server_connection.h @@ -6,18 +6,8 @@ #include "source/common/quic/quic_network_connection.h" #include "source/server/connection_handler_impl.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - #include "quiche/quic/core/quic_connection.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - namespace Envoy { namespace Quic { diff --git a/source/common/quic/envoy_quic_server_session.cc b/source/common/quic/envoy_quic_server_session.cc index 67d224deeeb0..869a92a82e4b 100644 --- a/source/common/quic/envoy_quic_server_session.cc +++ b/source/common/quic/envoy_quic_server_session.cc @@ -7,6 +7,8 @@ #include "source/common/quic/envoy_quic_proof_source.h" #include "source/common/quic/envoy_quic_server_stream.h" +#include "quic_filter_manager_connection_impl.h" + namespace Envoy { namespace Quic { @@ -23,6 +25,7 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( send_buffer_limit), quic_connection_(std::move(connection)), quic_stat_names_(quic_stat_names), listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory) { + quic_ssl_info_ = std::make_shared(*this); } EnvoyQuicServerSession::~EnvoyQuicServerSession() { @@ -146,6 +149,27 @@ void EnvoyQuicServerSession::OnRstStream(const quic::QuicRstStreamFrame& frame) /*from_self*/ false, /*is_upstream*/ false); } +void EnvoyQuicServerSession::setHttp3Options( + const envoy::config::core::v3::Http3ProtocolOptions& http3_options) { + QuicFilterManagerConnectionImpl::setHttp3Options(http3_options); + if (http3_options_->has_quic_protocol_options() && + http3_options_->quic_protocol_options().has_connection_keepalive()) { + const uint64_t initial_interval = PROTOBUF_GET_MS_OR_DEFAULT( + http3_options_->quic_protocol_options().connection_keepalive(), initial_interval, 0); + const uint64_t max_interval = + PROTOBUF_GET_MS_OR_DEFAULT(http3_options_->quic_protocol_options().connection_keepalive(), + max_interval, quic::kPingTimeoutSecs); + if (max_interval == 0) { + return; + } + if (initial_interval > 0) { + connection()->set_ping_timeout(quic::QuicTime::Delta::FromMilliseconds(max_interval)); + connection()->set_initial_retransmittable_on_wire_timeout( + quic::QuicTime::Delta::FromMilliseconds(initial_interval)); + } + } +} + void EnvoyQuicServerSession::storeConnectionMapPosition(FilterChainToConnectionMap& connection_map, const Network::FilterChain& filter_chain, ConnectionMapIter position) { diff --git a/source/common/quic/envoy_quic_server_session.h b/source/common/quic/envoy_quic_server_session.h index e63ba55473a5..59bb2384e0f3 100644 --- a/source/common/quic/envoy_quic_server_session.h +++ b/source/common/quic/envoy_quic_server_session.h @@ -1,31 +1,19 @@ #pragma once +#include #include -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#pragma GCC diagnostic ignored "-Wtype-limits" -#endif +#include "source/common/quic/envoy_quic_crypto_stream_factory.h" +#include "source/common/quic/envoy_quic_server_connection.h" +#include "source/common/quic/envoy_quic_server_stream.h" +#include "source/common/quic/quic_filter_manager_connection_impl.h" +#include "source/common/quic/quic_stat_names.h" +#include "source/common/quic/send_buffer_monitor.h" #include "quiche/quic/core/http/quic_server_session_base.h" #include "quiche/quic/core/quic_crypto_server_stream.h" #include "quiche/quic/core/tls_server_handshaker.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include - -#include "source/common/quic/send_buffer_monitor.h" -#include "source/common/quic/quic_filter_manager_connection_impl.h" -#include "source/common/quic/envoy_quic_server_connection.h" -#include "source/common/quic/envoy_quic_server_stream.h" -#include "source/common/quic/envoy_quic_crypto_stream_factory.h" -#include "source/common/quic/quic_stat_names.h" - namespace Envoy { namespace Quic { @@ -99,6 +87,8 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, const Network::FilterChain& filter_chain, ConnectionMapIter position); + void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) override; + using quic::QuicSession::PerformActionOnActiveStreams; protected: diff --git a/source/common/quic/envoy_quic_server_stream.cc b/source/common/quic/envoy_quic_server_stream.cc index 1e62a2203055..b93e3a216acb 100644 --- a/source/common/quic/envoy_quic_server_stream.cc +++ b/source/common/quic/envoy_quic_server_stream.cc @@ -5,28 +5,17 @@ #include -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/assert.h" +#include "source/common/http/header_map_impl.h" +#include "source/common/http/header_utility.h" +#include "source/common/quic/envoy_quic_server_session.h" +#include "source/common/quic/envoy_quic_utils.h" #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/quic/core/quic_session.h" #include "quiche/spdy/core/spdy_header_block.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_utils.h" -#include "source/common/quic/envoy_quic_server_session.h" - -#include "source/common/buffer/buffer_impl.h" -#include "source/common/http/header_map_impl.h" -#include "source/common/common/assert.h" -#include "source/common/http/header_utility.h" - namespace Envoy { namespace Quic { @@ -211,6 +200,11 @@ void EnvoyQuicServerStream::OnBodyAvailable() { if (fin_read_and_no_trailers) { end_stream_decoded_ = true; } + updateReceivedContentBytes(buffer->length(), fin_read_and_no_trailers); + if (stream_error() != quic::QUIC_STREAM_NO_ERROR) { + // A stream error has occurred, stop processing. + return; + } request_decoder_->decodeData(*buffer, fin_read_and_no_trailers); } @@ -248,6 +242,11 @@ void EnvoyQuicServerStream::maybeDecodeTrailers() { if (sequencer()->IsClosed() && !FinishedReadingTrailers()) { // Only decode trailers after finishing decoding body. end_stream_decoded_ = true; + updateReceivedContentBytes(0, true); + if (stream_error() != quic::QUIC_STREAM_NO_ERROR) { + // A stream error has occurred, stop processing. + return; + } quic::QuicRstStreamErrorCode rst = quic::QUIC_STREAM_NO_ERROR; auto trailers = spdyHeaderBlockToEnvoyTrailers( received_trailers(), filterManagerConnection()->maxIncomingHeadersCount(), *this, details_, diff --git a/source/common/quic/envoy_quic_server_stream.h b/source/common/quic/envoy_quic_server_stream.h index 8693cd197b2b..5662b08fab7d 100644 --- a/source/common/quic/envoy_quic_server_stream.h +++ b/source/common/quic/envoy_quic_server_stream.h @@ -1,19 +1,9 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif +#include "source/common/quic/envoy_quic_stream.h" #include "quiche/quic/core/http/quic_spdy_server_stream_base.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - -#include "source/common/quic/envoy_quic_stream.h" - namespace Envoy { namespace Quic { @@ -80,17 +70,16 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, void onPendingFlushTimer() override; bool hasPendingData() override; + void + onStreamError(absl::optional should_close_connection, + quic::QuicRstStreamErrorCode rst = quic::QUIC_BAD_APPLICATION_PAYLOAD) override; + private: QuicFilterManagerConnectionImpl* filterManagerConnection(); // Deliver awaiting trailers if body has been delivered. void maybeDecodeTrailers(); - // Either reset the stream or close the connection according to - // should_close_connection and configured http3 options. - void onStreamError(absl::optional should_close_connection, - quic::QuicRstStreamErrorCode rst = quic::QUIC_BAD_APPLICATION_PAYLOAD); - Http::RequestDecoder* request_decoder_{nullptr}; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; diff --git a/source/common/quic/envoy_quic_stream.h b/source/common/quic/envoy_quic_stream.h index 4f1669f493ef..305fbbebe42a 100644 --- a/source/common/quic/envoy_quic_stream.h +++ b/source/common/quic/envoy_quic_stream.h @@ -107,21 +107,47 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, return Http::HeaderUtility::HeaderValidationResult::REJECT; } if (header_name == "content-length") { - return Http::HeaderUtility::validateContentLength( - header_value, override_stream_error_on_invalid_http_message, - close_connection_upon_invalid_header_); + size_t content_length = 0; + Http::HeaderUtility::HeaderValidationResult result = + Http::HeaderUtility::validateContentLength( + header_value, override_stream_error_on_invalid_http_message, + close_connection_upon_invalid_header_, content_length); + content_length_ = content_length; + return result; } return Http::HeaderUtility::HeaderValidationResult::ACCEPT; } absl::string_view responseDetails() override { return details_; } + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { return bytes_meter_; } + protected: virtual void switchStreamBlockState() PURE; // Needed for ENVOY_STREAM_LOG. virtual uint32_t streamId() PURE; virtual Network::Connection* connection() PURE; + // Either reset the stream or close the connection according to + // should_close_connection and configured http3 options. + virtual void + onStreamError(absl::optional should_close_connection, + quic::QuicRstStreamErrorCode rst = quic::QUIC_BAD_APPLICATION_PAYLOAD) PURE; + + // TODO(danzh) remove this once QUICHE enforces content-length consistency. + void updateReceivedContentBytes(size_t payload_length, bool end_stream) { + received_content_bytes_ += payload_length; + if (!content_length_.has_value()) { + return; + } + if (received_content_bytes_ > content_length_.value() || + (end_stream && received_content_bytes_ != content_length_.value() && + !(got_304_response_ && received_content_bytes_ == 0) && !(sent_head_request_))) { + details_ = Http3ResponseCodeDetailValues::inconsistent_content_length; + // Reset instead of closing the connection to align with nghttp2. + onStreamError(false); + } + } // True once end of stream is propagated to Envoy. Envoy doesn't expect to be // notified more than once about end of stream. So once this is true, no need @@ -141,6 +167,8 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, // TODO(kbaichoo): bind the account to the QUIC buffers to enable tracking of // memory allocated within QUIC buffers. Buffer::BufferMemoryAccountSharedPtr buffer_memory_account_ = nullptr; + bool got_304_response_{false}; + bool sent_head_request_{false}; private: // Keeps track of bytes buffered in the stream send buffer in QUICHE and reacts @@ -157,6 +185,10 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, // state change in its own call stack. And Envoy upstream doesn't like quic stream to be unblocked // in its callstack either because the stream will push data right away. Event::SchedulableCallbackPtr async_stream_blockage_change_; + + StreamInfo::BytesMeterSharedPtr bytes_meter_{std::make_shared()}; + absl::optional content_length_; + size_t received_content_bytes_{0}; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_utils.cc b/source/common/quic/envoy_quic_utils.cc index 9557a77a2eb8..433d379580da 100644 --- a/source/common/quic/envoy_quic_utils.cc +++ b/source/common/quic/envoy_quic_utils.cc @@ -127,7 +127,7 @@ Http::StreamResetReason quicErrorCodeToEnvoyRemoteResetReason(quic::QuicErrorCod } Network::ConnectionSocketPtr -createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, +createConnectionSocket(const Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options) { if (local_addr == nullptr) { @@ -176,6 +176,10 @@ bssl::UniquePtr parseDERCertificate(const std::string& der_bytes, int deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details) { int sign_alg = 0; + if (public_key == nullptr) { + *error_details = "Invalid leaf cert, bad public key"; + return sign_alg; + } const int pkey_id = EVP_PKEY_id(public_key); switch (pkey_id) { case EVP_PKEY_EC: { @@ -233,6 +237,14 @@ createServerConnectionSocket(Network::IoHandle& io_handle, return connection_socket; } +void convertQuicConfig(const envoy::config::core::v3::QuicProtocolOptions& config, + quic::QuicConfig& quic_config) { + int32_t max_streams = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_concurrent_streams, 100); + quic_config.SetMaxBidirectionalStreamsToSend(max_streams); + quic_config.SetMaxUnidirectionalStreamsToSend(max_streams); + configQuicInitialFlowControlWindow(config, quic_config); +} + void configQuicInitialFlowControlWindow(const envoy::config::core::v3::QuicProtocolOptions& config, quic::QuicConfig& quic_config) { size_t stream_flow_control_window_to_send = PROTOBUF_GET_WRAPPED_OR_DEFAULT( diff --git a/source/common/quic/envoy_quic_utils.h b/source/common/quic/envoy_quic_utils.h index b07324b049bc..ccef576914f9 100644 --- a/source/common/quic/envoy_quic_utils.h +++ b/source/common/quic/envoy_quic_utils.h @@ -6,30 +6,18 @@ #include "source/common/common/assert.h" #include "source/common/http/header_map_impl.h" +#include "source/common/http/header_utility.h" #include "source/common/network/address_impl.h" #include "source/common/network/listen_socket_impl.h" #include "source/common/quic/quic_io_handle_wrapper.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - -#include "quiche/quic/core/quic_types.h" -#include "quiche/quic/core/quic_config.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - +#include "openssl/ssl.h" #include "quiche/quic/core/http/quic_header_list.h" +#include "quiche/quic/core/quic_config.h" #include "quiche/quic/core/quic_error_codes.h" +#include "quiche/quic/core/quic_types.h" #include "quiche/quic/platform/api/quic_ip_address.h" #include "quiche/quic/platform/api/quic_socket_address.h" -#include "source/common/http/header_utility.h" - -#include "openssl/ssl.h" namespace Envoy { namespace Quic { @@ -53,6 +41,9 @@ class Http3ResponseCodeDetailValues { static constexpr absl::string_view too_many_trailers = "http3.too_many_trailers"; // Too many headers were sent. static constexpr absl::string_view too_many_headers = "http3.too_many_headers"; + // The payload size is different from what the content-length header indicated. + static constexpr absl::string_view inconsistent_content_length = + "http3.inconsistent_content_length"; }; // TODO(danzh): this is called on each write. Consider to return an address instance on the stack if @@ -166,7 +157,7 @@ Http::StreamResetReason quicErrorCodeToEnvoyRemoteResetReason(quic::QuicErrorCod // Create a connection socket instance and apply given socket options to the // socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported. Network::ConnectionSocketPtr -createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, +createConnectionSocket(const Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options); @@ -187,6 +178,10 @@ createServerConnectionSocket(Network::IoHandle& io_handle, const quic::QuicSocketAddress& peer_address, const std::string& hostname, absl::string_view alpn); +// Alter QuicConfig based on all the options in the supplied config. +void convertQuicConfig(const envoy::config::core::v3::QuicProtocolOptions& config, + quic::QuicConfig& quic_config); + // Set initial flow control windows in quic_config according to the given Envoy config. void configQuicInitialFlowControlWindow(const envoy::config::core::v3::QuicProtocolOptions& config, quic::QuicConfig& quic_config); diff --git a/source/common/quic/platform/quic_logging_impl.h b/source/common/quic/platform/quic_logging_impl.h index f5dc65dd7c43..10fdb01e91e2 100644 --- a/source/common/quic/platform/quic_logging_impl.h +++ b/source/common/quic/platform/quic_logging_impl.h @@ -128,6 +128,7 @@ #endif #define QUICHE_PREDICT_FALSE_IMPL(x) ABSL_PREDICT_FALSE(x) +#define QUICHE_PREDICT_TRUE_IMPL(x) ABSL_PREDICT_TRUE(x) namespace quic { diff --git a/source/common/quic/platform/quiche_flags_impl.cc b/source/common/quic/platform/quiche_flags_impl.cc index af607a83adc4..30da7e75db9e 100644 --- a/source/common/quic/platform/quiche_flags_impl.cc +++ b/source/common/quic/platform/quiche_flags_impl.cc @@ -32,8 +32,11 @@ absl::flat_hash_map makeFlagMap() { QUIC_FLAG(FLAGS_quic_restart_flag_http2_testonly_default_false, false) QUIC_FLAG(FLAGS_quic_restart_flag_http2_testonly_default_true, true) #undef QUIC_FLAG - // Disable IETF draft 29 implementation. Envoy only supports RFC-v1. + // Envoy only supports RFC-v1 in the long term, so disable IETF draft 29 implementation by + // default. FLAGS_quic_reloadable_flag_quic_disable_version_draft_29->setValue(true); + // This flag fixes a QUICHE issue which may crash Envoy during connection close. + FLAGS_quic_reloadable_flag_quic_single_ack_in_packet2->setValue(true); #define QUIC_PROTOCOL_FLAG(type, flag, ...) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag); #include "quiche/quic/core/quic_protocol_flags_list.h" diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index b69a63f2681d..168ddcd6d584 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -3,6 +3,8 @@ #include #include +#include "quic_ssl_connection_info.h" + namespace Envoy { namespace Quic { @@ -117,8 +119,7 @@ QuicFilterManagerConnectionImpl::socketOptions() const { } Ssl::ConnectionInfoConstSharedPtr QuicFilterManagerConnectionImpl::ssl() const { - // TODO(danzh): construct Ssl::ConnectionInfo from crypto stream - return nullptr; + return Ssl::ConnectionInfoConstSharedPtr(quic_ssl_info_); } void QuicFilterManagerConnectionImpl::rawWrite(Buffer::Instance& /*data*/, bool /*end_stream*/) { @@ -178,9 +179,14 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( // The connection was closed before it could be used. Stats are not recorded. return; } - if (version.transport_version == quic::QUIC_VERSION_IETF_RFC_V1) { + switch (version.transport_version) { + case quic::QUIC_VERSION_IETF_DRAFT_29: + codec_stats_->quic_version_h3_29_.inc(); + return; + case quic::QUIC_VERSION_IETF_RFC_V1: codec_stats_->quic_version_rfc_v1_.inc(); - } else { + return; + default: ENVOY_BUG(false, fmt::format("Unexpected QUIC version {}", quic::QuicVersionToString(version.transport_version))); } diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index 7003860c8011..7b2138d5e1ee 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -5,27 +5,18 @@ #include "envoy/event/dispatcher.h" #include "envoy/network/connection.h" -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - -#include "quiche/quic/core/quic_connection.h" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - #include "source/common/common/empty_string.h" #include "source/common/common/logger.h" #include "source/common/http/http3/codec_stats.h" #include "source/common/network/connection_impl_base.h" -#include "source/common/quic/quic_network_connection.h" #include "source/common/quic/envoy_quic_simulated_watermark_buffer.h" +#include "source/common/quic/quic_network_connection.h" +#include "source/common/quic/quic_ssl_connection_info.h" #include "source/common/quic/send_buffer_monitor.h" #include "source/common/stream_info/stream_info_impl.h" +#include "quiche/quic/core/quic_connection.h" + namespace Envoy { class TestPauseFilterForQuic; @@ -138,7 +129,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, uint32_t bytesToSend() { return bytes_to_send_; } - void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) { + virtual void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) { http3_options_ = http3_options; } @@ -161,14 +152,15 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, virtual bool hasDataToWrite() PURE; // Returns a QuicConnection interface if initialized_ is true, otherwise nullptr. - virtual const quic::QuicConnection* quicConnection() const = 0; - virtual quic::QuicConnection* quicConnection() = 0; + virtual const quic::QuicConnection* quicConnection() const PURE; + virtual quic::QuicConnection* quicConnection() PURE; QuicNetworkConnection* network_connection_{nullptr}; OptRef codec_stats_; OptRef http3_options_; bool initialized_{false}; + std::shared_ptr quic_ssl_info_; private: friend class Envoy::TestPauseFilterForQuic; diff --git a/source/common/quic/quic_network_connection.cc b/source/common/quic/quic_network_connection.cc index bc22c6efaa67..22906c0d37d1 100644 --- a/source/common/quic/quic_network_connection.cc +++ b/source/common/quic/quic_network_connection.cc @@ -3,10 +3,15 @@ namespace Envoy { namespace Quic { -QuicNetworkConnection::QuicNetworkConnection(Network::ConnectionSocketPtr&& connection_socket) - : connection_socket_(std::move(connection_socket)) {} +QuicNetworkConnection::QuicNetworkConnection(Network::ConnectionSocketPtr&& connection_socket) { + connection_sockets_.push_back(std::move(connection_socket)); +} -QuicNetworkConnection::~QuicNetworkConnection() { connection_socket_->close(); } +QuicNetworkConnection::~QuicNetworkConnection() { + for (auto& socket : connection_sockets_) { + socket->close(); + } +} uint64_t QuicNetworkConnection::id() const { return envoy_connection_->id(); } diff --git a/source/common/quic/quic_network_connection.h b/source/common/quic/quic_network_connection.h index ae122304c353..62264eaa69e1 100644 --- a/source/common/quic/quic_network_connection.h +++ b/source/common/quic/quic_network_connection.h @@ -28,7 +28,9 @@ class QuicNetworkConnection : protected Logger::Loggable // Called in session Initialize(). void setEnvoyConnection(Network::Connection& connection) { envoy_connection_ = &connection; } - const Network::ConnectionSocketPtr& connectionSocket() const { return connection_socket_; } + const Network::ConnectionSocketPtr& connectionSocket() const { + return connection_sockets_.back(); + } // Needed for ENVOY_CONN_LOG. uint64_t id() const; @@ -36,16 +38,20 @@ class QuicNetworkConnection : protected Logger::Loggable protected: Network::Connection::ConnectionStats& connectionStats() const { return *connection_stats_; } + std::vector& connectionSockets() { return connection_sockets_; } + void setConnectionSocket(Network::ConnectionSocketPtr&& connection_socket) { - connection_socket_ = std::move(connection_socket); + connection_sockets_.push_back(std::move(connection_socket)); } private: // TODO(danzh): populate stats. std::unique_ptr connection_stats_; - // Assigned upon construction. Constructed with empty local address if unknown - // by then. - Network::ConnectionSocketPtr connection_socket_; + // Hosts a list of active sockets, while only the last one is used for writing data. + // Hosts a single default socket upon construction. New sockets can be pushed in later as a result + // of QUIC connection migration. + // TODO(renjietang): Impose an upper limit. + std::vector connection_sockets_; // Points to an instance of EnvoyQuicServerSession or EnvoyQuicClientSession. Network::Connection* envoy_connection_{nullptr}; }; diff --git a/source/common/quic/quic_ssl_connection_info.h b/source/common/quic/quic_ssl_connection_info.h new file mode 100644 index 000000000000..c79cf9a2952b --- /dev/null +++ b/source/common/quic/quic_ssl_connection_info.h @@ -0,0 +1,33 @@ +#pragma once + +#include "source/extensions/transport_sockets/tls/connection_info_impl_base.h" + +#include "quiche/quic/core/quic_session.h" + +namespace Envoy { +namespace Quic { + +// A wrapper of a QUIC session to be passed around as an indicator of ssl support and to provide +// access to the SSL object in QUIC crypto stream. +class QuicSslConnectionInfo : public Extensions::TransportSockets::Tls::ConnectionInfoImplBase { +public: + QuicSslConnectionInfo(quic::QuicSession& session) : session_(session) {} + + // Ssl::ConnectionInfo + bool peerCertificateValidated() const override { return cert_validated_; }; + // Extensions::TransportSockets::Tls::ConnectionInfoImplBase + SSL* ssl() const override { + ASSERT(session_.GetCryptoStream() != nullptr); + ASSERT(session_.GetCryptoStream()->GetSsl() != nullptr); + return session_.GetCryptoStream()->GetSsl(); + } + + void onCertValidated() { cert_validated_ = true; }; + +private: + quic::QuicSession& session_; + bool cert_validated_{false}; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/common/quic/send_buffer_monitor.h b/source/common/quic/send_buffer_monitor.h index db2211bee039..1a7101464009 100644 --- a/source/common/quic/send_buffer_monitor.h +++ b/source/common/quic/send_buffer_monitor.h @@ -1,17 +1,7 @@ #pragma once -#if defined(__GNUC__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#endif - #include "quiche/quic/core/quic_stream.h" -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#endif - namespace Envoy { namespace Quic { diff --git a/source/common/quic/udp_gso_batch_writer.h b/source/common/quic/udp_gso_batch_writer.h index 06dfc06cf462..2d3d8c12c9bd 100644 --- a/source/common/quic/udp_gso_batch_writer.h +++ b/source/common/quic/udp_gso_batch_writer.h @@ -5,32 +5,13 @@ #else #define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 1 -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Winvalid-offsetof" -#pragma GCC diagnostic ignored "-Wignored-qualifiers" - -// QUICHE doesn't mark override at QuicBatchWriterBase::SupportsReleaseTime() -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Winconsistent-missing-override" -#elif defined(__GNUC__) && __GNUC__ >= 5 -#pragma GCC diagnostic ignored "-Wsuggest-override" -#endif - -#include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h" - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - -#pragma GCC diagnostic pop - #include "envoy/network/udp_packet_writer_handler.h" #include "source/common/protobuf/utility.h" #include "source/common/runtime/runtime_protos.h" +#include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h" + namespace Envoy { namespace Quic { diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 4e0bbe914570..cc7b7b979edc 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -61,10 +61,13 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:path_utility_lib", "//source/common/http:utility_lib", + "//source/common/http/matching:data_impl_lib", + "//source/common/matcher:matcher_lib", "//source/common/protobuf:utility_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:retry_factory_lib", "//source/extensions/filters/http/common:utility_lib", + "@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", @@ -233,6 +236,7 @@ envoy_cc_library( "//source/common/config:xds_resource_lib", "//source/common/init:manager_lib", "//source/common/init:watcher_lib", + "//source/common/protobuf:utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index d756c607966a..705911b97a3e 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -8,11 +8,15 @@ #include #include +#include "envoy/config/common/matcher/v3/matcher.pb.h" +#include "envoy/config/common/matcher/v3/matcher.pb.validate.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/http/header_map.h" #include "envoy/runtime/runtime.h" +#include "envoy/type/matcher/v3/http_inputs.pb.h" +#include "envoy/type/matcher/v3/http_inputs.pb.validate.h" #include "envoy/type/matcher/v3/string.pb.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/cluster_manager.h" @@ -29,8 +33,10 @@ #include "source/common/config/utility.h" #include "source/common/config/well_known_names.h" #include "source/common/http/headers.h" +#include "source/common/http/matching/data_impl.h" #include "source/common/http/path_utility.h" #include "source/common/http/utility.h" +#include "source/common/matcher/matcher.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" #include "source/common/router/reset_header_parser.h" @@ -60,6 +66,69 @@ void mergeTransforms(Http::HeaderTransforms& dest, const Http::HeaderTransforms& src.headers_to_remove.end()); } +RouteEntryImplBaseConstSharedPtr createAndValidateRoute( + const envoy::config::route::v3::Route& route_config, const VirtualHostImpl& vhost, + const OptionalHttpFilters& optional_http_filters, + Server::Configuration::ServerFactoryContext& factory_context, + ProtobufMessage::ValidationVisitor& validator, + const absl::optional& validation_clusters) { + + RouteEntryImplBaseConstSharedPtr route; + switch (route_config.match().path_specifier_case()) { + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPrefix: { + route = std::make_shared(vhost, route_config, optional_http_filters, + factory_context, validator); + break; + } + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPath: { + route = std::make_shared(vhost, route_config, optional_http_filters, + factory_context, validator); + break; + } + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex: { + route = std::make_shared(vhost, route_config, optional_http_filters, + factory_context, validator); + break; + } + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kConnectMatcher: { + route = std::make_shared(vhost, route_config, optional_http_filters, + factory_context, validator); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + if (validation_clusters.has_value()) { + route->validateClusters(*validation_clusters); + for (const auto& shadow_policy : route->shadowPolicies()) { + ASSERT(!shadow_policy->cluster().empty()); + if (!validation_clusters->hasCluster(shadow_policy->cluster())) { + throw EnvoyException( + fmt::format("route: unknown shadow cluster '{}'", shadow_policy->cluster())); + } + } + } + + return route; +} + +class RouteActionValidationVisitor + : public Matcher::MatchTreeValidationVisitor { +public: + absl::Status performDataInputValidation(const Matcher::DataInputFactory&, + absl::string_view type_url) override { + static std::string request_header_input_name = TypeUtil::descriptorFullNameToTypeUrl( + envoy::type::matcher::v3::HttpRequestHeaderMatchInput::descriptor()->full_name()); + if (type_url == request_header_input_name) { + return absl::OkStatus(); + } + + return absl::InvalidArgumentError( + fmt::format("Route table can only match on request headers, saw {}", type_url)); + } +}; + const envoy::config::route::v3::WeightedCluster::ClusterWeight& validateWeightedClusterSpecifier( const envoy::config::route::v3::WeightedCluster::ClusterWeight& cluster) { if (!cluster.name().empty() && !cluster.cluster_header().empty()) { @@ -1319,41 +1388,27 @@ VirtualHostImpl::VirtualHostImpl( hedge_policy_ = virtual_host.hedge_policy(); } - for (const auto& route : virtual_host.routes()) { - switch (route.match().path_specifier_case()) { - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPrefix: { - routes_.emplace_back(new PrefixRouteEntryImpl(*this, route, optional_http_filters, - factory_context, validator)); - break; - } - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kPath: { - routes_.emplace_back( - new PathRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); - break; - } - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex: { - routes_.emplace_back( - new RegexRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); - break; - } - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kConnectMatcher: { - routes_.emplace_back(new ConnectRouteEntryImpl(*this, route, optional_http_filters, - factory_context, validator)); - break; - } - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } + if (virtual_host.has_matcher() && !virtual_host.routes().empty()) { + throw EnvoyException("cannot set both matcher and routes on virtual host"); + } - if (validation_clusters.has_value()) { - routes_.back()->validateClusters(*validation_clusters); - for (const auto& shadow_policy : routes_.back()->shadowPolicies()) { - ASSERT(!shadow_policy->cluster().empty()); - if (!validation_clusters->hasCluster(shadow_policy->cluster())) { - throw EnvoyException( - fmt::format("route: unknown shadow cluster '{}'", shadow_policy->cluster())); - } - } + if (virtual_host.has_matcher()) { + RouteActionContext context{*this, optional_http_filters, factory_context}; + RouteActionValidationVisitor validation_visitor; + Matcher::MatchTreeFactory factory( + context, factory_context, validation_visitor); + + matcher_ = factory.create(virtual_host.matcher())(); + + if (!validation_visitor.errors().empty()) { + // TODO(snowp): Output all violations. + throw EnvoyException(fmt::format("requirement violation while creating route match tree: {}", + validation_visitor.errors()[0])); + } + } else { + for (const auto& route : virtual_host.routes()) { + routes_.emplace_back(createAndValidateRoute(route, *this, optional_http_filters, + factory_context, validator, validation_clusters)); } } @@ -1477,33 +1532,59 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const RouteCallback& cb return SSL_REDIRECT_ROUTE; } - // Check for a route that matches the request. - for (auto route = routes_.begin(); route != routes_.end(); ++route) { - if (!headers.Path() && !(*route)->supportsPathlessHeaders()) { - continue; - } + if (matcher_) { + Http::Matching::HttpMatchingDataImpl data; + data.onRequestHeaders(headers); - RouteConstSharedPtr route_entry = (*route)->matches(headers, stream_info, random_value); - if (nullptr == route_entry) { - continue; + auto match = Matcher::evaluateMatch(*matcher_, data); + + if (match.result_) { + // The only possible action that can be used within the route matching context + // is the RouteMatchAction, so this must be true. + ASSERT(match.result_->typeUrl() == RouteMatchAction::staticTypeUrl()); + ASSERT(dynamic_cast(match.result_.get())); + const RouteMatchAction& route_action = static_cast(*match.result_); + + if (route_action.route()->matches(headers, stream_info, random_value)) { + return route_action.route(); + } + + ENVOY_LOG(debug, "route was resolved but final route did not match incoming request"); + return nullptr; } - if (cb) { - RouteEvalStatus eval_status = (std::next(route) == routes_.end()) - ? RouteEvalStatus::NoMoreRoutes - : RouteEvalStatus::HasMoreRoutes; - RouteMatchStatus match_status = cb(route_entry, eval_status); - if (match_status == RouteMatchStatus::Accept) { - return route_entry; + ENVOY_LOG(debug, "failed to match incoming request: {}", match.match_state_); + + return nullptr; + } else { + // Check for a route that matches the request. + for (auto route = routes_.begin(); route != routes_.end(); ++route) { + if (!headers.Path() && !(*route)->supportsPathlessHeaders()) { + continue; } - if (match_status == RouteMatchStatus::Continue && - eval_status == RouteEvalStatus::NoMoreRoutes) { - return nullptr; + + RouteConstSharedPtr route_entry = (*route)->matches(headers, stream_info, random_value); + if (nullptr == route_entry) { + continue; + } + + if (cb) { + RouteEvalStatus eval_status = (std::next(route) == routes_.end()) + ? RouteEvalStatus::NoMoreRoutes + : RouteEvalStatus::HasMoreRoutes; + RouteMatchStatus match_status = cb(route_entry, eval_status); + if (match_status == RouteMatchStatus::Accept) { + return route_entry; + } + if (match_status == RouteMatchStatus::Continue && + eval_status == RouteEvalStatus::NoMoreRoutes) { + return nullptr; + } + continue; } - continue; - } - return route_entry; + return route_entry; + } } return nullptr; @@ -1632,17 +1713,14 @@ RouteSpecificFilterConfigConstSharedPtr PerFilterConfigs::createRouteSpecificFil Envoy::Config::Utility::translateOpaqueConfig(typed_config, validator, *proto_config); auto object = factory->createRouteSpecificFilterConfig(*proto_config, factory_context, validator); if (object == nullptr) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.check_unsupported_typed_per_filter_config") && - !is_optional) { + if (is_optional) { + ENVOY_LOG(debug, + "The filter {} doesn't support virtual host-specific configurations, and it is " + "optional, so ignore it.", + name); + } else { throw EnvoyException( fmt::format("The filter {} doesn't support virtual host-specific configurations", name)); - } else { - ENVOY_LOG(warn, - "The filter {} doesn't support virtual host-specific configurations. Set runtime " - "config `envoy.reloadable_features.check_unsupported_typed_per_filter_config` as " - "true to reject any invalid virtual-host specific configuration.", - name); } } return object; @@ -1671,5 +1749,18 @@ const RouteSpecificFilterConfig* PerFilterConfigs::get(const std::string& name) return it == configs_.end() ? nullptr : it->second.get(); } +Matcher::ActionFactoryCb RouteMatchActionFactory::createActionFactoryCb( + const Protobuf::Message& config, RouteActionContext& context, + ProtobufMessage::ValidationVisitor& validation_visitor) { + const auto& route_config = + MessageUtil::downcastAndValidate(config, + validation_visitor); + auto route = createAndValidateRoute(route_config, context.vhost, context.optional_http_filters, + context.factory_context, validation_visitor, absl::nullopt); + + return [route]() { return std::make_unique(route); }; +} +REGISTER_FACTORY(RouteMatchActionFactory, Matcher::ActionFactory); + } // namespace Router } // namespace Envoy diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 659861f3b11c..4b6126587d24 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -13,6 +13,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/config/route/v3/route_components.pb.validate.h" #include "envoy/router/router.h" #include "envoy/runtime/runtime.h" #include "envoy/server/filter_config.h" @@ -23,6 +24,7 @@ #include "source/common/config/metadata.h" #include "source/common/http/hash_policy.h" #include "source/common/http/header_utility.h" +#include "source/common/matcher/matcher.h" #include "source/common/router/config_utility.h" #include "source/common/router/header_formatter.h" #include "source/common/router/header_parser.h" @@ -189,7 +191,7 @@ class ConfigImpl; /** * Holds all routing configuration for an entire virtual host. */ -class VirtualHostImpl : public VirtualHost { +class VirtualHostImpl : public VirtualHost, Logger::Loggable { public: VirtualHostImpl( const envoy::config::route::v3::VirtualHost& virtual_host, @@ -281,6 +283,7 @@ class VirtualHostImpl : public VirtualHost { absl::optional retry_policy_; absl::optional hedge_policy_; const CatchAllVirtualCluster virtual_cluster_catch_all_; + Matcher::MatchTreeSharedPtr matcher_; }; using VirtualHostSharedPtr = std::shared_ptr; @@ -1059,6 +1062,37 @@ class ConnectRouteEntryImpl : public RouteEntryImplBase { bool supportsPathlessHeaders() const override { return true; } }; + +// Contextual information used to construct the route actions for a match tree. +struct RouteActionContext { + const VirtualHostImpl& vhost; + const OptionalHttpFilters& optional_http_filters; + Server::Configuration::ServerFactoryContext& factory_context; +}; + +// Action used with the matching tree to specify route to use for an incoming stream. +class RouteMatchAction : public Matcher::ActionBase { +public: + explicit RouteMatchAction(RouteEntryImplBaseConstSharedPtr route) : route_(std::move(route)) {} + + RouteEntryImplBaseConstSharedPtr route() const { return route_; } + +private: + const RouteEntryImplBaseConstSharedPtr route_; +}; + +// Registered factory for RouteMatchAction. +class RouteMatchActionFactory : public Matcher::ActionFactory { +public: + Matcher::ActionFactoryCb + createActionFactoryCb(const Protobuf::Message& config, RouteActionContext& context, + ProtobufMessage::ValidationVisitor& validation_visitor) override; + std::string name() const override { return "route"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } +}; + /** * Wraps the route configuration which matches an incoming request headers to a backend cluster. * This is split out mainly to help with unit testing. diff --git a/source/common/router/scoped_config_impl.cc b/source/common/router/scoped_config_impl.cc index 6aafffaf5ce2..3cc128477f07 100644 --- a/source/common/router/scoped_config_impl.cc +++ b/source/common/router/scoped_config_impl.cc @@ -75,9 +75,9 @@ HeaderValueExtractorImpl::computeFragment(const Http::HeaderMap& headers) const return nullptr; } -ScopedRouteInfo::ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration&& config_proto, - ConfigConstSharedPtr&& route_config) - : config_proto_(std::move(config_proto)), route_config_(std::move(route_config)) { +ScopedRouteInfo::ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration config_proto, + ConfigConstSharedPtr route_config) + : config_proto_(config_proto), route_config_(route_config) { // TODO(stevenzzzz): Maybe worth a KeyBuilder abstraction when there are more than one type of // Fragment. for (const auto& fragment : config_proto_.key().fragments()) { diff --git a/source/common/router/scoped_config_impl.h b/source/common/router/scoped_config_impl.h index e1783da59255..cd66fe152366 100644 --- a/source/common/router/scoped_config_impl.h +++ b/source/common/router/scoped_config_impl.h @@ -81,8 +81,8 @@ class ScopeKeyBuilderImpl : public ScopeKeyBuilderBase { // ScopedRouteConfiguration and corresponding RouteConfigProvider. class ScopedRouteInfo { public: - ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration&& config_proto, - ConfigConstSharedPtr&& route_config); + ScopedRouteInfo(envoy::config::route::v3::ScopedRouteConfiguration config_proto, + ConfigConstSharedPtr route_config); const ConfigConstSharedPtr& routeConfig() const { return route_config_; } const ScopeKey& scopeKey() const { return scope_key_; } @@ -109,9 +109,15 @@ using ScopedRouteMap = std::map; */ class ScopedConfigImpl : public ScopedConfig { public: - ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder) + explicit ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder) : scope_key_builder_(std::move(scope_key_builder)) {} + ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder, + const std::vector& scoped_route_infos) + : scope_key_builder_(std::move(scope_key_builder)) { + addOrUpdateRoutingScopes(scoped_route_infos); + } + void addOrUpdateRoutingScopes(const std::vector& scoped_route_infos); diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 133e91e313dd..de3e5832130a 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -17,6 +17,7 @@ #include "source/common/config/xds_resource.h" #include "source/common/init/manager_impl.h" #include "source/common/init/watcher_impl.h" +#include "source/common/protobuf/utility.h" #include "source/common/router/rds_impl.h" #include "source/common/router/scoped_config_impl.h" @@ -78,20 +79,53 @@ ConfigProviderPtr create( } // namespace ScopedRoutesConfigProviderUtil +namespace { + +std::vector +makeScopedRouteInfos(ProtobufTypes::ConstMessagePtrVector&& config_protos, + Server::Configuration::ServerFactoryContext& factory_context, + ScopedRoutesConfigProviderManager& config_provider_manager, + const OptionalHttpFilters& optional_http_filters) { + std::vector scopes; + for (std::unique_ptr& config_proto : config_protos) { + auto scoped_route_config = + MessageUtil::downcastAndValidate( + *config_proto, factory_context.messageValidationContext().staticValidationVisitor()); + if (!scoped_route_config.route_configuration_name().empty()) { + throw EnvoyException("Fetching routes via RDS (route_configuration_name) is not supported " + "with inline scoped routes."); + } + if (!scoped_route_config.has_route_configuration()) { + throw EnvoyException("You must specify a route_configuration with inline scoped routes."); + } + RouteConfigProviderPtr route_config_provider = + config_provider_manager.routeConfigProviderManager().createStaticRouteConfigProvider( + scoped_route_config.route_configuration(), optional_http_filters, factory_context, + factory_context.messageValidationContext().staticValidationVisitor()); + scopes.push_back(std::make_shared(scoped_route_config, + route_config_provider->config())); + } + + return scopes; +} + +} // namespace + InlineScopedRoutesConfigProvider::InlineScopedRoutesConfigProvider( ProtobufTypes::ConstMessagePtrVector&& config_protos, std::string name, Server::Configuration::ServerFactoryContext& factory_context, ScopedRoutesConfigProviderManager& config_provider_manager, envoy::config::core::v3::ConfigSource rds_config_source, envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes::ScopeKeyBuilder - scope_key_builder) + scope_key_builder, + const OptionalHttpFilters& optional_http_filters) : Envoy::Config::ImmutableConfigProviderBase(factory_context, config_provider_manager, ConfigProviderInstanceType::Inline, ConfigProvider::ApiType::Delta), name_(std::move(name)), - config_(std::make_shared(std::move(scope_key_builder))), - config_protos_(std::make_move_iterator(config_protos.begin()), - std::make_move_iterator(config_protos.end())), + scopes_(makeScopedRouteInfos(std::move(config_protos), factory_context, + config_provider_manager, optional_http_filters)), + config_(std::make_shared(std::move(scope_key_builder), scopes_)), rds_config_source_(std::move(rds_config_source)) {} ScopedRdsConfigSubscription::ScopedRdsConfigSubscription( @@ -244,6 +278,9 @@ bool ScopedRdsConfigSubscription::addOrUpdateScopes( envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config = dynamic_cast( resource.get().resource()); + if (scoped_route_config.route_configuration_name().empty()) { + throw EnvoyException("route_configuration_name is empty."); + } const std::string scope_name = scoped_route_config.name(); rds.set_route_config_name(scoped_route_config.route_configuration_name()); std::unique_ptr rds_config_provider_helper; @@ -576,7 +613,7 @@ ConfigProviderPtr ScopedRoutesConfigProviderManager::createXdsConfigProvider( typed_optarg.scoped_routes_name_, typed_optarg.scope_key_builder_, factory_context, stat_prefix, typed_optarg.rds_config_source_, static_cast(config_provider_manager) - .routeConfigProviderPanager(), + .routeConfigProviderManager(), static_cast(config_provider_manager)); }); @@ -590,7 +627,8 @@ ConfigProviderPtr ScopedRoutesConfigProviderManager::createStaticConfigProvider( const auto& typed_optarg = static_cast(optarg); return std::make_unique( std::move(config_protos), typed_optarg.scoped_routes_name_, factory_context, *this, - typed_optarg.rds_config_source_, typed_optarg.scope_key_builder_); + typed_optarg.rds_config_source_, typed_optarg.scope_key_builder_, + typed_optarg.optional_http_filters_); } } // namespace Router diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index d21d812741e3..eb53c8b57fe8 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -52,7 +52,8 @@ class InlineScopedRoutesConfigProvider : public Envoy::Config::ImmutableConfigPr ScopedRoutesConfigProviderManager& config_provider_manager, envoy::config::core::v3::ConfigSource rds_config_source, envoy::extensions::filters::network::http_connection_manager:: - v3::ScopedRoutes::ScopeKeyBuilder scope_key_builder); + v3::ScopedRoutes::ScopeKeyBuilder scope_key_builder, + const OptionalHttpFilters& optional_http_filters); ~InlineScopedRoutesConfigProvider() override = default; @@ -61,9 +62,9 @@ class InlineScopedRoutesConfigProvider : public Envoy::Config::ImmutableConfigPr // Envoy::Config::ConfigProvider Envoy::Config::ConfigProvider::ConfigProtoVector getConfigProtos() const override { Envoy::Config::ConfigProvider::ConfigProtoVector out_protos; - std::for_each(config_protos_.begin(), config_protos_.end(), - [&out_protos](const std::unique_ptr& message) { - out_protos.push_back(message.get()); + std::for_each(scopes_.begin(), scopes_.end(), + [&out_protos](const ScopedRouteInfoConstSharedPtr& scope) { + out_protos.push_back(&scope->configProto()); }); return out_protos; } @@ -73,8 +74,8 @@ class InlineScopedRoutesConfigProvider : public Envoy::Config::ImmutableConfigPr private: const std::string name_; + const std::vector scopes_; ConfigConstSharedPtr config_; - const std::vector> config_protos_; const envoy::config::core::v3::ConfigSource rds_config_source_; }; @@ -295,7 +296,7 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa Server::Configuration::ServerFactoryContext& factory_context, const Envoy::Config::ConfigProviderManager::OptionalArg& optarg) override; - RouteConfigProviderManager& routeConfigProviderPanager() { + RouteConfigProviderManager& routeConfigProviderManager() { return route_config_provider_manager_; } diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 147f7af49fd0..34ccc3f38c5f 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -303,7 +303,6 @@ void UpstreamRequest::onResetStream(Http::StreamResetReason reason, span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); span_->setTag(Tracing::Tags::get().ErrorReason, Http::Utility::resetReasonToString(reason)); } - clearRequestEncoder(); awaiting_headers_ = false; if (!calling_encode_headers_) { @@ -394,12 +393,7 @@ void UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, reset_reason = Http::StreamResetReason::ConnectionFailure; break; case ConnectionPool::PoolFailureReason::Timeout: - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure")) { - reset_reason = Http::StreamResetReason::ConnectionFailure; - } else { - reset_reason = Http::StreamResetReason::LocalReset; - } + reset_reason = Http::StreamResetReason::ConnectionFailure; } // Mimic an upstream reset. @@ -415,7 +409,6 @@ void UpstreamRequest::onPoolReady( ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); ENVOY_STREAM_LOG(debug, "pool ready", *parent_.callbacks()); upstream_ = std::move(upstream); - // Have the upstream use the account of the downstream. upstream_->setAccount(parent_.callbacks()->account()); @@ -452,6 +445,10 @@ void UpstreamRequest::onPoolReady( info.downstreamAddressProvider().connectionID().value()); } + stream_info_.setUpstreamBytesMeter(upstream_->bytesMeter()); + StreamInfo::StreamInfo::syncUpstreamAndDownstreamBytesMeter(parent_.callbacks()->streamInfo(), + stream_info_); + if (parent_.downstreamEndStream()) { setupPerTryTimeout(); } else { diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 98f214c0d277..112c2a5216e6 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -119,7 +119,7 @@ class UpstreamRequest : public Logger::Loggable, bool encodeComplete() const { return encode_complete_; } RouterFilterInterface& parent() { return parent_; } // Exposes streamInfo for the upstream stream. - const StreamInfo::StreamInfo& streamInfo() const { return stream_info_; } + StreamInfo::StreamInfo& streamInfo() { return stream_info_; } private: bool shouldSendEndStream() { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 945a5ff1225c..707299a14799 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -51,59 +51,51 @@ uint64_t getInteger(absl::string_view feature, uint64_t default_value) { // If issues are found that require a runtime feature to be disabled, it should be reported // ASAP by filing a bug on github. Overriding non-buggy code is strongly discouraged to avoid the // problem of the bugs being found after the old code path has been removed. +// clang-format off constexpr const char* runtime_features[] = { // Enabled "envoy.reloadable_features.test_feature_true", // Begin alphabetically sorted section. - "envoy.reloadable_features.add_and_validate_scheme_header", "envoy.reloadable_features.allow_response_for_timeout", - "envoy.reloadable_features.check_unsupported_typed_per_filter_config", "envoy.reloadable_features.conn_pool_delete_when_idle", "envoy.reloadable_features.correct_scheme_and_xfp", "envoy.reloadable_features.disable_tls_inspector_injection", - "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", - "envoy.reloadable_features.enable_compression_without_content_length_header", "envoy.reloadable_features.fix_added_trailers", "envoy.reloadable_features.grpc_bridge_stats_disabled", "envoy.reloadable_features.grpc_web_fix_non_proto_encoded_response_handling", - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", "envoy.reloadable_features.hash_multiple_header_values", "envoy.reloadable_features.health_check.graceful_goaway_handling", - "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster", "envoy.reloadable_features.http2_consume_stream_refused_errors", - "envoy.reloadable_features.http2_skip_encoding_empty_trailers", "envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect", "envoy.reloadable_features.http_reject_path_with_fragment", "envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled", "envoy.reloadable_features.http_transport_failure_reason_in_body", - "envoy.reloadable_features.improved_stream_limit_handling", "envoy.reloadable_features.internal_redirects_with_body", "envoy.reloadable_features.listener_reuse_port_default_enabled", "envoy.reloadable_features.listener_wildcard_match_ip_family", "envoy.reloadable_features.new_tcp_connection_pool", "envoy.reloadable_features.no_chunked_encoding_header_for_304", "envoy.reloadable_features.preserve_downstream_scheme", - "envoy.reloadable_features.remove_forked_chromium_url", "envoy.reloadable_features.require_strict_1xx_and_204_response_headers", - "envoy.reloadable_features.return_502_for_upstream_protocol_errors", "envoy.reloadable_features.send_strict_1xx_and_204_response_headers", "envoy.reloadable_features.strip_port_from_connect", - "envoy.reloadable_features.treat_host_like_authority", - "envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure", "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place", "envoy.reloadable_features.udp_per_event_loop_read_limit", "envoy.reloadable_features.unquote_log_string_values", - "envoy.reloadable_features.upstream_host_weight_change_causes_rebuild", + "envoy.reloadable_features.use_dns_ttl", "envoy.reloadable_features.use_observable_cluster_name", "envoy.reloadable_features.validate_connect", "envoy.reloadable_features.vhds_heartbeats", - "envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "envoy.reloadable_features.upstream_http2_flood_checks", + "envoy.restart_features.explicit_wildcard_resource", "envoy.restart_features.use_apple_api_for_dns_lookups", + // Misplaced flags: please do not add flags to this section. "envoy.reloadable_features.header_map_correctly_coalesce_cookies", "envoy.reloadable_features.sanitize_http_header_referer", "envoy.reloadable_features.skip_dispatching_frames_for_closed_connection", + // End misplaced flags: please do not add flags in this section. }; +// clang-format on // This is a section for officially sanctioned runtime features which are too // high risk to be enabled by default. Examples where we have opted to land @@ -114,8 +106,8 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { - // v2 is fatal-by-default. - "envoy.test_only.broken_in_production.enable_deprecated_v2_api", + // TODO(alyssawilk, junr03) flip (and add release notes + docs) these after Lyft tests + "envoy.reloadable_features.allow_multiple_dns_addresses", // TODO(asraa) flip to true in a separate PR to enable the new JSON by default. "envoy.reloadable_features.remove_legacy_json", // Sentinel and test flag. @@ -126,6 +118,8 @@ constexpr const char* disabled_runtime_features[] = { // CacheOption is CacheWhenRuntimeEnabled. // Caller that use AlwaysCache option will always cache, unaffected by this runtime. "envoy.reloadable_features.enable_grpc_async_client_cache", + // TODO(dmitri-d) reset to true to enable unified mux by default + "envoy.reloadable_features.unified_mux", }; RuntimeFeatures::RuntimeFeatures() { diff --git a/source/common/stats/custom_stat_namespaces_impl.cc b/source/common/stats/custom_stat_namespaces_impl.cc index 24c2f6d246f6..89c95344fc2b 100644 --- a/source/common/stats/custom_stat_namespaces_impl.cc +++ b/source/common/stats/custom_stat_namespaces_impl.cc @@ -7,18 +7,18 @@ namespace Envoy { namespace Stats { bool CustomStatNamespacesImpl::registered(const absl::string_view name) const { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); return namespaces_.find(name) != namespaces_.end(); } void CustomStatNamespacesImpl::registerStatNamespace(const absl::string_view name) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); namespaces_.insert(std::string(name)); }; absl::optional CustomStatNamespacesImpl::stripRegisteredPrefix(const absl::string_view stat_name) const { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); if (!namespaces_.empty()) { const auto pos = stat_name.find_first_of('.'); if (pos != std::string::npos && registered(stat_name.substr(0, pos))) { diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index cac590339318..c2921ea7180d 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -15,23 +15,15 @@ const ConstSupportedBuckets default_buckets{}; } HistogramStatisticsImpl::HistogramStatisticsImpl() - : supported_buckets_(default_buckets), computed_quantiles_(supportedQuantiles().size(), 0.0) {} + : supported_buckets_(default_buckets), computed_quantiles_(supportedQuantiles().size(), 0.0), + unit_(Histogram::Unit::Unspecified) {} HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr, + Histogram::Unit unit, ConstSupportedBuckets& supported_buckets) : supported_buckets_(supported_buckets), - computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) { - hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), - HistogramStatisticsImpl::supportedQuantiles().size(), - computed_quantiles_.data()); - - sample_count_ = hist_sample_count(histogram_ptr); - sample_sum_ = hist_approx_sum(histogram_ptr); - - computed_buckets_.reserve(supported_buckets_.size()); - for (const auto bucket : supported_buckets_) { - computed_buckets_.emplace_back(hist_approx_count_below(histogram_ptr, bucket)); - } + computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0), unit_(unit) { + refresh(histogram_ptr); } const std::vector& HistogramStatisticsImpl::supportedQuantiles() const { @@ -64,19 +56,33 @@ std::string HistogramStatisticsImpl::bucketSummary() const { * Clears the old computed values and refreshes it with values computed from passed histogram. */ void HistogramStatisticsImpl::refresh(const histogram_t* new_histogram_ptr) { + // Convert to double once to avoid needing to cast it on every use. Use a double + // to ensure the compiler doesn't try to convert the expression to integer math. + constexpr double percent_scale = Histogram::PercentScale; + std::fill(computed_quantiles_.begin(), computed_quantiles_.end(), 0.0); ASSERT(supportedQuantiles().size() == computed_quantiles_.size()); hist_approx_quantile(new_histogram_ptr, supportedQuantiles().data(), supportedQuantiles().size(), computed_quantiles_.data()); + if (unit_ == Histogram::Unit::Percent) { + for (double& val : computed_quantiles_) { + val /= percent_scale; + } + } sample_count_ = hist_sample_count(new_histogram_ptr); sample_sum_ = hist_approx_sum(new_histogram_ptr); + if (unit_ == Histogram::Unit::Percent) { + sample_sum_ /= percent_scale; + } - ASSERT(supportedBuckets().size() == computed_buckets_.size()); computed_buckets_.clear(); ConstSupportedBuckets& supported_buckets = supportedBuckets(); computed_buckets_.reserve(supported_buckets.size()); - for (const auto bucket : supported_buckets) { + for (auto bucket : supported_buckets) { + if (unit_ == Histogram::Unit::Percent) { + bucket *= percent_scale; + } computed_buckets_.emplace_back(hist_approx_count_below(new_histogram_ptr, bucket)); } } diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index 03992a06cd7b..b9b083db5210 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -36,7 +36,7 @@ class HistogramSettingsImpl : public HistogramSettings { /** * Implementation of HistogramStatistics for circllhist. */ -class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { +class HistogramStatisticsImpl final : public HistogramStatistics, NonCopyable { public: HistogramStatisticsImpl(); @@ -46,7 +46,7 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { * will not be retained. */ HistogramStatisticsImpl( - const histogram_t* histogram_ptr, + const histogram_t* histogram_ptr, Histogram::Unit unit = Histogram::Unit::Unspecified, ConstSupportedBuckets& supported_buckets = HistogramSettingsImpl::defaultBuckets()); static ConstSupportedBuckets& defaultSupportedBuckets(); @@ -69,6 +69,7 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { std::vector computed_buckets_; uint64_t sample_count_; double sample_sum_; + const Histogram::Unit unit_; }; class HistogramImplHelper : public MetricImpl { diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 2b79daa7ee86..97960182096c 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -816,8 +816,9 @@ ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, : MetricImpl(name, tag_extracted_name, stat_name_tags, thread_local_store.symbolTable()), unit_(unit), thread_local_store_(thread_local_store), interval_histogram_(hist_alloc()), cumulative_histogram_(hist_alloc()), - interval_statistics_(interval_histogram_, supported_buckets), - cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false), id_(id) {} + interval_statistics_(interval_histogram_, unit, supported_buckets), + cumulative_statistics_(cumulative_histogram_, unit, supported_buckets), merged_(false), + id_(id) {} ParentHistogramImpl::~ParentHistogramImpl() { thread_local_store_.releaseHistogramCrossThread(id_); diff --git a/source/common/stats/timespan_impl.cc b/source/common/stats/timespan_impl.cc index 33875c0656a4..8af91437b76a 100644 --- a/source/common/stats/timespan_impl.cc +++ b/source/common/stats/timespan_impl.cc @@ -26,6 +26,7 @@ void HistogramCompletableTimespanImpl::ensureTimeHistogram(const Histogram& hist return; case Histogram::Unit::Unspecified: case Histogram::Unit::Bytes: + case Histogram::Unit::Percent: RELEASE_ASSERT( false, fmt::format("Cannot create a timespan flushing the duration to histogram '{}' because " @@ -47,6 +48,7 @@ uint64_t HistogramCompletableTimespanImpl::tickCount() const { return HistogramCompletableTimespanImpl::elapsedDuration().count(); case Histogram::Unit::Unspecified: case Histogram::Unit::Bytes: + case Histogram::Unit::Percent: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 5abd0c7261ec..403ab403154b 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -283,6 +283,32 @@ struct StreamInfoImpl : public StreamInfo { absl::optional attemptCount() const override { return attempt_count_; } + const BytesMeterSharedPtr& getUpstreamBytesMeter() const override { + return upstream_bytes_meter_; + } + + const BytesMeterSharedPtr& getDownstreamBytesMeter() const override { + return downstream_bytes_meter_; + } + + void setUpstreamBytesMeter(const BytesMeterSharedPtr& upstream_bytes_meter) override { + // Accumulate the byte measurement from previous upstream request during a retry. + upstream_bytes_meter->addWireBytesSent(upstream_bytes_meter_->wireBytesSent()); + upstream_bytes_meter->addWireBytesReceived(upstream_bytes_meter_->wireBytesReceived()); + upstream_bytes_meter->addHeaderBytesSent(upstream_bytes_meter_->headerBytesSent()); + upstream_bytes_meter->addHeaderBytesReceived(upstream_bytes_meter_->headerBytesReceived()); + + upstream_bytes_meter_ = upstream_bytes_meter; + } + + void setDownstreamBytesMeter(const BytesMeterSharedPtr& downstream_bytes_meter) override { + // Downstream bytes counter don't reset during a retry. + if (downstream_bytes_meter_ == nullptr) { + downstream_bytes_meter_ = downstream_bytes_meter; + } + ASSERT(downstream_bytes_meter_.get() == downstream_bytes_meter.get()); + } + TimeSource& time_source_; const SystemTime start_time_; const MonotonicTime start_time_monotonic_; @@ -339,6 +365,9 @@ struct StreamInfoImpl : public StreamInfo { absl::optional upstream_cluster_info_; std::string filter_chain_name_; Tracing::Reason trace_reason_; + // Default construct the object because upstream stream is not constructed in some cases. + BytesMeterSharedPtr upstream_bytes_meter_{std::make_shared()}; + BytesMeterSharedPtr downstream_bytes_meter_; }; } // namespace StreamInfo diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 9cfa340eff0d..5a65a91d6a18 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -65,7 +65,7 @@ void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::Connected) { connection_->readDisable(true); } - Envoy::ConnectionPool::ActiveClient::onEvent(event); + parent_.onConnectionEvent(*this, connection_->transportFailureReason(), event); if (callbacks_) { // Do not pass the Connected event to any session which registered during onEvent above. // Consumers of connection pool connections assume they are receiving already connected diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index 7321d3723a5c..bafb0266a20a 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -198,7 +198,8 @@ void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::Connecti PendingRequestPtr request = pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure(reason, "", conn.real_host_description_); + request->callbacks_.onPoolFailure(reason, conn.conn_->transportFailureReason(), + conn.real_host_description_); } } diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index baeade40f0c2..99f473a7cc07 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -16,14 +16,13 @@ namespace ThreadLocal { thread_local InstanceImpl::ThreadLocalData InstanceImpl::thread_local_data_; InstanceImpl::~InstanceImpl() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(shutdown_); thread_local_data_.data_.clear(); - Thread::MainThread::clear(); } SlotPtr InstanceImpl::allocateSlot() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); if (free_slot_indexes_.empty()) { @@ -92,7 +91,7 @@ void InstanceImpl::SlotImpl::runOnAllThreads(const UpdateCb& cb) { } void InstanceImpl::SlotImpl::set(InitializeCb cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!parent_.shutdown_); for (Event::Dispatcher& dispatcher : parent_.registered_threads_) { @@ -106,7 +105,7 @@ void InstanceImpl::SlotImpl::set(InitializeCb cb) { } void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_thread) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); if (main_thread) { @@ -120,7 +119,7 @@ void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_threa } void InstanceImpl::removeSlot(uint32_t slot) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); // When shutting down, we do not post slot removals to other threads. This is because the other // threads have already shut down and the dispatcher is no longer alive. There is also no reason @@ -147,7 +146,7 @@ void InstanceImpl::removeSlot(uint32_t slot) { } void InstanceImpl::runOnAllThreads(Event::PostCb cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); for (Event::Dispatcher& dispatcher : registered_threads_) { @@ -159,7 +158,7 @@ void InstanceImpl::runOnAllThreads(Event::PostCb cb) { } void InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_complete_cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); // Handle main thread first so that when the last worker thread wins, we could just call the // all_threads_complete_cb method. Parallelism of main thread execution is being traded off @@ -186,7 +185,7 @@ void InstanceImpl::setThreadLocal(uint32_t index, ThreadLocalObjectSharedPtr obj } void InstanceImpl::shutdownGlobalThreading() { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); ASSERT(!shutdown_); shutdown_ = true; } diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index f7825b66e151..26c08dfce332 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -19,7 +19,6 @@ namespace ThreadLocal { */ class InstanceImpl : Logger::Loggable, public NonCopyable, public Instance { public: - InstanceImpl() { Thread::MainThread::initMainThread(); } ~InstanceImpl() override; // ThreadLocal::Instance @@ -78,6 +77,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub static thread_local ThreadLocalData thread_local_data_; + Thread::MainThread main_thread_; std::vector slots_; // A list of index of freed slots. std::list free_slot_indexes_; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index aca75b4cf2b6..eb64222b561a 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -68,6 +68,7 @@ envoy_cc_library( "//source/common/common:enum_to_int", "//source/common/common:utility_lib", "//source/common/config:grpc_mux_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", "//source/common/config:xds_resource_lib", @@ -310,6 +311,7 @@ envoy_cc_library( ":logical_host_lib", ":upstream_includes", "//envoy/upstream:cluster_factory_interface", + "//source/common/common:dns_utils_lib", "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/network:address_lib", @@ -417,6 +419,7 @@ envoy_cc_library( hdrs = ["eds.h"], deps = [ ":cluster_factory_lib", + ":leds_lib", ":upstream_includes", "//envoy/config:grpc_mux_interface", "//envoy/config:subscription_factory_interface", @@ -508,6 +511,7 @@ envoy_cc_library( "//envoy/network:listen_socket_interface", "//envoy/ssl:context_interface", "//envoy/upstream:health_checker_interface", + "//source/common/common:dns_utils_lib", "//source/common/common:enum_to_int", "//source/common/common:thread_lib", "//source/common/common:utility_lib", @@ -630,6 +634,7 @@ envoy_cc_library( "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/server:transport_socket_config_lib", diff --git a/source/common/upstream/cds_api_helper.cc b/source/common/upstream/cds_api_helper.cc index ddfb2f6a78d0..4511391f645a 100644 --- a/source/common/upstream/cds_api_helper.cc +++ b/source/common/upstream/cds_api_helper.cc @@ -17,10 +17,13 @@ std::vector CdsApiHelper::onConfigUpdate(const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { - Config::ScopedResume maybe_resume_eds; + Config::ScopedResume maybe_resume_eds_leds; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl(); - maybe_resume_eds = cm_.adsMux()->pause(type_url); + // A cluster update pauses sending EDS and LEDS requests. + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + maybe_resume_eds_leds = cm_.adsMux()->pause({eds_type_url, leds_type_url}); } ENVOY_LOG(info, "{}: add {} cluster(s), remove {} cluster(s)", name_, added_resources.size(), diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index f3f2f2f06e77..fdebb5066bc1 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -5,6 +5,7 @@ #include "source/common/http/utility.h" #include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/socket_option_factory.h" #include "source/common/upstream/health_checker_impl.h" @@ -88,32 +89,17 @@ ClusterFactoryImplBase::selectDnsResolver(const envoy::config::cluster::v3::Clus // where 'dns_resolvers' is specified, we have per-cluster DNS // resolvers that are created here but ownership resides with // StrictDnsClusterImpl/LogicalDnsCluster. - if ((cluster.has_dns_resolution_config() && + if ((cluster.has_typed_dns_resolver_config() && + !(cluster.typed_dns_resolver_config().typed_config().type_url().empty())) || + (cluster.has_dns_resolution_config() && !cluster.dns_resolution_config().resolvers().empty()) || !cluster.dns_resolvers().empty()) { - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - std::vector resolvers; - Protobuf::RepeatedPtrField resolver_addrs; - if (cluster.has_dns_resolution_config()) { - dns_resolver_options.CopyFrom(cluster.dns_resolution_config().dns_resolver_options()); - resolver_addrs.CopyFrom(cluster.dns_resolution_config().resolvers()); - } else { - /* if `cluster.dns_resolution_config` is not set. */ - // Field bool `use_tcp_for_dns_lookups` will be deprecated in future. To be backward - // compatible utilize cluster.use_tcp_for_dns_lookups(). - dns_resolver_options.set_use_tcp_for_dns_lookups(cluster.use_tcp_for_dns_lookups()); - // Field repeated Address `dns_resolvers` will be deprecated in future. To be backward - // compatible utilize cluster.dns_resolvers(). - resolver_addrs.CopyFrom(cluster.dns_resolvers()); - } - if (!resolver_addrs.empty()) { - resolvers.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); - } - } - return context.mainThreadDispatcher().createDnsResolver(resolvers, dns_resolver_options); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDnsResolverFactoryFromProto(cluster, typed_dns_resolver_config); + return dns_resolver_factory.createDnsResolver(context.mainThreadDispatcher(), context.api(), + typed_dns_resolver_config); } return context.dnsResolver(); diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index f7a11ba8b27c..d2505431d218 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -23,6 +23,7 @@ #include "source/common/common/utility.h" #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/utility.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/config/xds_resource.h" #include "source/common/grpc/async_client_manager_impl.h" #include "source/common/http/async_client_impl.h" @@ -182,11 +183,12 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to // avoid double pause ClusterLoadAssignment. - Config::ScopedResume maybe_resume_eds; + Config::ScopedResume maybe_resume_eds_leds; if (cm_.adsMux()) { - const auto type_url = + const auto eds_type_url = Config::getTypeUrl(); - maybe_resume_eds = cm_.adsMux()->pause(type_url); + const auto leds_type_url = Config::getTypeUrl(); + maybe_resume_eds_leds = cm_.adsMux()->pause({eds_type_url, leds_type_url}); } initializeSecondaryClusters(); } @@ -338,28 +340,56 @@ ClusterManagerImpl::ClusterManagerImpl( if (dyn_resources.ads_config().api_type() == envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) { Config::Utility::checkTransportVersion(dyn_resources.ads_config()); - ads_mux_ = std::make_shared( - Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, - dyn_resources.ads_config(), stats, false) - ->createUncachedRawAsyncClient(), - main_thread_dispatcher, - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources"), - random_, stats_, - Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.unified_mux")) { + ads_mux_ = std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, + dyn_resources.ads_config(), stats, false) + ->createUncachedRawAsyncClient(), + main_thread_dispatcher, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v3.AggregatedDiscoveryService." + "DeltaAggregatedResources"), + random_, stats_, + Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info, + dyn_resources.ads_config().set_node_on_first_message_only()); + } else { + ads_mux_ = std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, + dyn_resources.ads_config(), stats, false) + ->createUncachedRawAsyncClient(), + main_thread_dispatcher, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources"), + random_, stats_, + Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info); + } } else { Config::Utility::checkTransportVersion(dyn_resources.ads_config()); - ads_mux_ = std::make_shared( - local_info, - Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, - dyn_resources.ads_config(), stats, false) - ->createUncachedRawAsyncClient(), - main_thread_dispatcher, - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), - random_, stats_, - Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), - bootstrap.dynamic_resources().ads_config().set_node_on_first_message_only()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.unified_mux")) { + ads_mux_ = std::make_shared( + Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, + dyn_resources.ads_config(), stats, false) + ->createUncachedRawAsyncClient(), + main_thread_dispatcher, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v3.AggregatedDiscoveryService." + "StreamAggregatedResources"), + random_, stats_, + Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info, + bootstrap.dynamic_resources().ads_config().set_node_on_first_message_only()); + } else { + ads_mux_ = std::make_shared( + local_info, + Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, + dyn_resources.ads_config(), stats, false) + ->createUncachedRawAsyncClient(), + main_thread_dispatcher, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, + Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), + bootstrap.dynamic_resources().ads_config().set_node_on_first_message_only()); + } } } else { ads_mux_ = std::make_unique(); @@ -1339,14 +1369,16 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( cluster->lbType(), priority_set_, parent_.local_priority_set_, cluster->stats(), cluster->statsScope(), parent.parent_.runtime_, parent.parent_.random_, cluster->lbSubsetInfo(), cluster->lbRingHashConfig(), cluster->lbMaglevConfig(), - cluster->lbLeastRequestConfig(), cluster->lbConfig()); + cluster->lbRoundRobinConfig(), cluster->lbLeastRequestConfig(), cluster->lbConfig(), + parent_.thread_local_dispatcher_.timeSource()); } else { switch (cluster->lbType()) { case LoadBalancerType::LeastRequest: { ASSERT(lb_factory_ == nullptr); lb_ = std::make_unique( priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_, - parent.parent_.random_, cluster->lbConfig(), cluster->lbLeastRequestConfig()); + parent.parent_.random_, cluster->lbConfig(), cluster->lbLeastRequestConfig(), + parent.thread_local_dispatcher_.timeSource()); break; } case LoadBalancerType::Random: { @@ -1358,9 +1390,10 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( } case LoadBalancerType::RoundRobin: { ASSERT(lb_factory_ == nullptr); - lb_ = std::make_unique(priority_set_, parent_.local_priority_set_, - cluster->stats(), parent.parent_.runtime_, - parent.parent_.random_, cluster->lbConfig()); + lb_ = std::make_unique( + priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_, + parent.parent_.random_, cluster->lbConfig(), cluster->lbRoundRobinConfig(), + parent.thread_local_dispatcher_.timeSource()); break; } case LoadBalancerType::ClusterProvided: @@ -1663,7 +1696,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( ASSERT(alternate_protocol_options.has_value()); #ifdef ENVOY_ENABLE_QUIC Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache = - alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value()); + alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value(), + dispatcher); Envoy::Http::ConnectivityGrid::ConnectivityOptions coptions{protocols}; return std::make_unique( dispatcher, context_.api().randomGenerator(), host, priority, options, diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index b95df7924a19..8460a0ef8b82 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -21,7 +21,7 @@ EdsClusterImpl::EdsClusterImpl( added_via_api, factory_context.mainThreadDispatcher().timeSource()), Envoy::Config::SubscriptionBase( factory_context.messageValidationVisitor(), "cluster_name"), - local_info_(factory_context.localInfo()), + factory_context_(factory_context), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() : cluster.eds_cluster_config().service_name()) { @@ -51,17 +51,25 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h priority_state_manager.initializePriorityFor(locality_lb_endpoint); - for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { - auto address = parent_.resolveProtoAddress(lb_endpoint.endpoint().address()); - // When the configuration contains duplicate hosts, only the first one will be retained. - if (all_new_hosts.count(address->asString()) > 0) { - continue; + if (locality_lb_endpoint.has_leds_cluster_locality_config()) { + // The locality uses LEDS, fetch its dynamic data, which must be ready, or otherwise + // the batchUpdate method should not have been called. + const auto& leds_config = locality_lb_endpoint.leds_cluster_locality_config(); + + // The batchUpdate call must be performed after all the endpoints of all localities + // were received. + ASSERT(parent_.leds_localities_.find(leds_config) != parent_.leds_localities_.end() && + parent_.leds_localities_[leds_config]->isUpdated()); + for (const auto& [_, lb_endpoint] : + parent_.leds_localities_[leds_config]->getEndpointsMap()) { + updateLocalityEndpoints(lb_endpoint, locality_lb_endpoint, priority_state_manager, + all_new_hosts); + } + } else { + for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { + updateLocalityEndpoints(lb_endpoint, locality_lb_endpoint, priority_state_manager, + all_new_hosts); } - - priority_state_manager.registerHostForPriority(lb_endpoint.endpoint().hostname(), address, - locality_lb_endpoint, lb_endpoint, - parent_.time_source_); - all_new_hosts.emplace(address->asString()); } } @@ -118,6 +126,23 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h parent_.onPreInitComplete(); } +void EdsClusterImpl::BatchUpdateHelper::updateLocalityEndpoints( + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, + const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, + PriorityStateManager& priority_state_manager, absl::flat_hash_set& all_new_hosts) { + const auto address = parent_.resolveProtoAddress(lb_endpoint.endpoint().address()); + // When the configuration contains duplicate hosts, only the first one will be retained. + const auto address_as_string = address->asString(); + if (all_new_hosts.count(address_as_string) > 0) { + return; + } + + priority_state_manager.registerHostForPriority(lb_endpoint.endpoint().hostname(), address, + locality_lb_endpoint, lb_endpoint, + parent_.time_source_); + all_new_hosts.emplace(address_as_string); +} + void EdsClusterImpl::onConfigUpdate(const std::vector& resources, const std::string&) { if (!validateUpdateSize(resources.size())) { @@ -130,6 +155,17 @@ void EdsClusterImpl::onConfigUpdate(const std::vector 0) { + throw EnvoyException(fmt::format( + "A ClusterLoadAssignment for cluster {} cannot include both LEDS (resource: {}) and a " + "list of endpoints.", + cluster_name_, locality.leds_cluster_locality_config().leds_collection_name())); + } + } // Disable timer (if enabled) as we have received new assignment. if (assignment_timeout_->enabled()) { @@ -144,7 +180,69 @@ void EdsClusterImpl::onConfigUpdate(const std::vectorenableTimer(std::chrono::milliseconds(stale_after_ms)); } - BatchUpdateHelper helper(*this, cluster_load_assignment); + // Pause LEDS messages until the EDS config is finished processing. + Config::ScopedResume maybe_resume_leds; + if (factory_context_.clusterManager().adsMux()) { + const auto type_url = Config::getTypeUrl(); + maybe_resume_leds = factory_context_.clusterManager().adsMux()->pause(type_url); + } + + // Compare the current set of LEDS localities (localities using LEDS) to the one received in the + // update. A LEDS locality can either be added, removed, or kept. If it is added we add a + // subscription to it, and if it is removed we delete the subscription. + LedsConfigSet cla_leds_configs; + + for (const auto& locality : cluster_load_assignment.endpoints()) { + if (locality.has_leds_cluster_locality_config()) { + cla_leds_configs.emplace(locality.leds_cluster_locality_config()); + } + } + + // Remove the LEDS localities that are not needed anymore. + absl::erase_if(leds_localities_, [&cla_leds_configs](const auto& item) { + auto const& [leds_config, _] = item; + // Returns true if the leds_config isn't in the cla_leds_configs + return cla_leds_configs.find(leds_config) == cla_leds_configs.end(); + }); + + // In case LEDS is used, store the cluster load assignment as a field + // (optimize for no-copy). + envoy::config::endpoint::v3::ClusterLoadAssignment* used_load_assignment; + if (cla_leds_configs.empty()) { + cluster_load_assignment_ = absl::nullopt; + used_load_assignment = &cluster_load_assignment; + } else { + cluster_load_assignment_ = std::move(cluster_load_assignment); + used_load_assignment = &cluster_load_assignment_.value(); + } + + // Add all the LEDS localities that are new. + for (const auto& leds_config : cla_leds_configs) { + if (leds_localities_.find(leds_config) == leds_localities_.end()) { + ENVOY_LOG(trace, "Found new LEDS config in EDS onConfigUpdate() for cluster {}: {}", + cluster_name_, leds_config.DebugString()); + + // Create a new LEDS subscription and add it to the subscriptions map. + LedsSubscriptionPtr leds_locality_subscription = std::make_unique( + leds_config, cluster_name_, factory_context_, info_->statsScope(), + [&, used_load_assignment]() { + // Called upon an update to the locality. + if (validateAllLedsUpdated()) { + BatchUpdateHelper helper(*this, *used_load_assignment); + priority_set_.batchHostUpdate(helper); + } + }); + leds_localities_.emplace(leds_config, std::move(leds_locality_subscription)); + } + } + + // If all the LEDS localities are updated, the EDS update can occur. If not, then when the last + // LEDS locality will be updated, it will trigger the EDS update helper. + if (!validateAllLedsUpdated()) { + return; + } + + BatchUpdateHelper helper(*this, *used_load_assignment); priority_set_.batchHostUpdate(helper); } @@ -291,6 +389,16 @@ EdsClusterFactory::createClusterImpl( nullptr); } +bool EdsClusterImpl::validateAllLedsUpdated() const { + // Iterate through all LEDS based localities, and if they are all updated return true. + for (const auto& [_, leds_subscription] : leds_localities_) { + if (!leds_subscription->isUpdated()) { + return false; + } + } + return true; +} + /** * Static registration for the Eds cluster factory. @see RegisterFactory. */ diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index f396b5e1785f..476e783a32a5 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -17,6 +17,7 @@ #include "source/common/config/subscription_base.h" #include "source/common/upstream/cluster_factory_impl.h" +#include "source/common/upstream/leds.h" #include "source/common/upstream/upstream_impl.h" namespace Envoy { @@ -60,6 +61,9 @@ class EdsClusterImpl void startPreInit() override; void onAssignmentTimeout(); + // Returns true iff all the LEDS based localities were updated. + bool validateAllLedsUpdated() const; + class BatchUpdateHelper : public PrioritySet::BatchUpdateCb { public: BatchUpdateHelper( @@ -71,16 +75,34 @@ class EdsClusterImpl void batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) override; private: + void updateLocalityEndpoints( + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, + const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, + PriorityStateManager& priority_state_manager, + absl::flat_hash_set& all_new_hosts); + EdsClusterImpl& parent_; const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment_; }; Config::SubscriptionPtr subscription_; + Server::Configuration::TransportSocketFactoryContextImpl factory_context_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; Event::TimerPtr assignment_timeout_; InitializePhase initialize_phase_; + using LedsConfigSet = absl::flat_hash_set; + using LedsConfigMap = absl::flat_hash_map; + // Maps between a LEDS configuration (ConfigSource + collection name) to the locality endpoints + // data. + LedsConfigMap leds_localities_; + // TODO(adisuissa): Avoid saving the entire cluster load assignment, only the + // relevant parts of the config for each locality. Note that this field must + // be set when LEDS is used. + absl::optional cluster_load_assignment_; }; using EdsClusterImplSharedPtr = std::shared_ptr; diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index e93505b4d619..da5963e1945d 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -219,7 +219,7 @@ void HealthCheckerImplBase::setUnhealthyCrossThread(const HostSharedPtr& host, return; } - session->second->setUnhealthy(envoy::data::core::v3::PASSIVE); + session->second->setUnhealthy(envoy::data::core::v3::PASSIVE, /*retriable=*/false); }); } @@ -338,13 +338,14 @@ bool networkHealthCheckFailureType(envoy::data::core::v3::HealthCheckFailureType } // namespace HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( - envoy::data::core::v3::HealthCheckFailureType type) { + envoy::data::core::v3::HealthCheckFailureType type, bool retriable) { // If we are unhealthy, reset the # of healthy to zero. num_healthy_ = 0; HealthTransition changed_state = HealthTransition::Unchanged; if (!host_->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC)) { - if (!networkHealthCheckFailureType(type) || ++num_unhealthy_ == parent_.unhealthy_threshold_) { + if ((!networkHealthCheckFailureType(type) && !retriable) || + ++num_unhealthy_ == parent_.unhealthy_threshold_) { host_->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); parent_.decHealthy(); changed_state = HealthTransition::Changed; @@ -385,8 +386,8 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::setUnhealthy( } void HealthCheckerImplBase::ActiveHealthCheckSession::handleFailure( - envoy::data::core::v3::HealthCheckFailureType type) { - HealthTransition changed_state = setUnhealthy(type); + envoy::data::core::v3::HealthCheckFailureType type, bool retriable) { + HealthTransition changed_state = setUnhealthy(type, retriable); // It's possible that the previous call caused this session to be deferred deleted. if (timeout_timer_ != nullptr) { timeout_timer_->disableTimer(); @@ -401,7 +402,7 @@ HealthTransition HealthCheckerImplBase::ActiveHealthCheckSession::clearPendingFlag(HealthTransition changed_state) { if (host_->healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC)) { host_->healthFlagClear(Host::HealthFlag::PENDING_ACTIVE_HC); - // Even though the health value of the host might have not changed, we set this to Changed to + // Even though the health value of the host might have not changed, we set this to Changed so // that the cluster can update its list of excluded hosts. return HealthTransition::Changed; } diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 5081aac7351d..780e8af0777d 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -76,7 +76,8 @@ class HealthCheckerImplBase : public HealthChecker, class ActiveHealthCheckSession : public Event::DeferredDeletable { public: ~ActiveHealthCheckSession() override; - HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type); + HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type, + bool retriable); void onDeferredDeleteBase(); void start() { onInitialInterval(); } @@ -85,7 +86,7 @@ class HealthCheckerImplBase : public HealthChecker, void handleSuccess(bool degraded = false); void handleDegraded(); - void handleFailure(envoy::data::core::v3::HealthCheckFailureType type); + void handleFailure(envoy::data::core::v3::HealthCheckFailureType type, bool retriable = false); HostSharedPtr host_; diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 09d8ebeca47e..6bb52b6b90ca 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -138,6 +138,7 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, Router::HeaderParser::configure(config.http_health_check().request_headers_to_add(), config.http_health_check().request_headers_to_remove())), http_status_checker_(config.http_health_check().expected_statuses(), + config.http_health_check().retriable_statuses(), static_cast(Http::Code::OK)), codec_client_type_(codecClientType(config.http_health_check().codec_client_type())), random_generator_(random) { @@ -148,37 +149,63 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, HttpHealthCheckerImpl::HttpStatusChecker::HttpStatusChecker( const Protobuf::RepeatedPtrField& expected_statuses, + const Protobuf::RepeatedPtrField& retriable_statuses, uint64_t default_expected_status) { for (const auto& status_range : expected_statuses) { - const auto start = status_range.start(); - const auto end = status_range.end(); + const auto start = static_cast(status_range.start()); + const auto end = static_cast(status_range.end()); - if (start >= end) { - throw EnvoyException(fmt::format( - "Invalid http status range: expecting start < end, but found start={} and end={}", start, - end)); - } + validateRange(start, end, "expected"); - if (start < 100) { - throw EnvoyException(fmt::format( - "Invalid http status range: expecting start >= 100, but found start={}", start)); - } + expected_ranges_.emplace_back(std::make_pair(start, end)); + } - if (end > 600) { - throw EnvoyException( - fmt::format("Invalid http status range: expecting end <= 600, but found end={}", end)); - } + if (expected_ranges_.empty()) { + expected_ranges_.emplace_back( + std::make_pair(default_expected_status, default_expected_status + 1)); + } + + for (const auto& status_range : retriable_statuses) { + const auto start = static_cast(status_range.start()); + const auto end = static_cast(status_range.end()); + + validateRange(start, end, "retriable"); + + retriable_ranges_.emplace_back(std::make_pair(start, end)); + } +} + +void HttpHealthCheckerImpl::HttpStatusChecker::validateRange(uint64_t start, uint64_t end, + absl::string_view range_type) { + if (start >= end) { + throw EnvoyException(fmt::format("Invalid http {} status range: expecting start < " + "end, but found start={} and end={}", + range_type, start, end)); + } - ranges_.emplace_back(std::make_pair(static_cast(start), static_cast(end))); + if (start < 100) { + throw EnvoyException( + fmt::format("Invalid http {} status range: expecting start >= 100, but found start={}", + range_type, start)); } - if (ranges_.empty()) { - ranges_.emplace_back(std::make_pair(default_expected_status, default_expected_status + 1)); + if (end > 600) { + throw EnvoyException(fmt::format( + "Invalid http {} status range: expecting end <= 600, but found end={}", range_type, end)); } } -bool HttpHealthCheckerImpl::HttpStatusChecker::inRange(uint64_t http_status) const { - for (const auto& range : ranges_) { +bool HttpHealthCheckerImpl::HttpStatusChecker::inRetriableRanges(uint64_t http_status) const { + return inRanges(http_status, retriable_ranges_); +} + +bool HttpHealthCheckerImpl::HttpStatusChecker::inExpectedRanges(uint64_t http_status) const { + return inRanges(http_status, expected_ranges_); +} + +bool HttpHealthCheckerImpl::HttpStatusChecker::inRanges( + uint64_t http_status, const std::vector>& ranges) { + for (const auto& range : ranges) { if (http_status >= range.first && http_status < range.second) { return true; } @@ -331,7 +358,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { ENVOY_CONN_LOG(debug, "hc response={} health_flags={}", *client_, response_code, HostUtility::healthFlagsToString(*host_)); - if (!parent_.http_status_checker_.inRange(response_code)) { + if (!parent_.http_status_checker_.inExpectedRanges(response_code)) { // If the HTTP response code would indicate failure AND the immediate health check // failure header is set, exclude the host from LB. // TODO(mattklein123): We could consider doing this check for any HTTP response code, but this @@ -341,7 +368,12 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { if (response_headers_->EnvoyImmediateHealthCheckFail() != nullptr) { host_->healthFlagSet(Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL); } - return HealthCheckResult::Failed; + + if (parent_.http_status_checker_.inRetriableRanges(response_code)) { + return HealthCheckResult::Retriable; + } else { + return HealthCheckResult::Failed; + } } const auto degraded = response_headers_->EnvoyDegraded() != nullptr; @@ -374,7 +406,10 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onResponseComplete() { handleSuccess(true); break; case HealthCheckResult::Failed: - handleFailure(envoy::data::core::v3::ACTIVE); + handleFailure(envoy::data::core::v3::ACTIVE, /*retriable=*/false); + break; + case HealthCheckResult::Retriable: + handleFailure(envoy::data::core::v3::ACTIVE, /*retriable=*/true); break; } diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 35a564f6118b..cb8d62a3d4a6 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -62,12 +62,19 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { public: HttpStatusChecker( const Protobuf::RepeatedPtrField& expected_statuses, + const Protobuf::RepeatedPtrField& retriable_statuses, uint64_t default_expected_status); - bool inRange(uint64_t http_status) const; + bool inRetriableRanges(uint64_t http_status) const; + bool inExpectedRanges(uint64_t http_status) const; private: - std::vector> ranges_; + static bool inRanges(uint64_t http_status, + const std::vector>& ranges); + static void validateRange(uint64_t start, uint64_t end, absl::string_view range_type); + + std::vector> expected_ranges_; + std::vector> retriable_ranges_; }; private: @@ -78,7 +85,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { ~HttpActiveHealthCheckSession() override; void onResponseComplete(); - enum class HealthCheckResult { Succeeded, Degraded, Failed }; + enum class HealthCheckResult { Succeeded, Degraded, Failed, Retriable }; HealthCheckResult healthCheckResult(); bool shouldClose() const; diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 52619e57f039..5cd361b1da58 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -11,6 +11,7 @@ #include "envoy/upstream/upstream.h" #include "source/common/common/assert.h" +#include "source/common/common/logger.h" #include "source/common/protobuf/utility.h" #include "absl/container/fixed_array.h" @@ -754,10 +755,21 @@ const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts EdfLoadBalancerBase::EdfLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional slow_start_config, + TimeSource& time_source) : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config), - seed_(random_.random()) { + seed_(random_.random()), + slow_start_window_(slow_start_config.has_value() + ? std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + slow_start_config.value().slow_start_window())) + : std::chrono::milliseconds(0)), + aggression_runtime_( + slow_start_config.has_value() && slow_start_config.value().has_aggression() + ? absl::optional({slow_start_config.value().aggression(), runtime}) + : absl::nullopt), + time_source_(time_source), latest_host_added_time_(time_source_.monotonicTime()) { // We fully recompute the schedulers for a given host set here on membership change, which is // consistent with what other LB implementations do (e.g. thread aware). // The downside of a full recompute is that time complexity is O(n * log n), @@ -765,6 +777,12 @@ EdfLoadBalancerBase::EdfLoadBalancerBase( // https://github.com/envoyproxy/envoy/issues/2874). priority_update_cb_ = priority_set.addPriorityUpdateCb( [this](uint32_t priority, const HostVector&, const HostVector&) { refresh(priority); }); + member_update_cb_ = priority_set.addMemberUpdateCb( + [this](const HostVector& hosts_added, const HostVector&) -> void { + if (isSlowStartEnabled()) { + recalculateHostsInSlowStart(hosts_added); + } + }); } void EdfLoadBalancerBase::initialize() { @@ -773,20 +791,38 @@ void EdfLoadBalancerBase::initialize() { } } +void EdfLoadBalancerBase::recalculateHostsInSlowStart(const HostVector& hosts) { + auto current_time = time_source_.monotonicTime(); + // TODO(nezdolik): linear scan can be improved with using flat hash set for hosts in slow start. + for (const auto& host : hosts) { + auto host_create_duration = + std::chrono::duration_cast(current_time - host->creationTime()); + // Check if host existence time is within slow start window. + if (host->creationTime() > latest_host_added_time_ && + host_create_duration <= slow_start_window_ && + host->health() == Upstream::Host::Health::Healthy) { + latest_host_added_time_ = host->creationTime(); + } + } +} + void EdfLoadBalancerBase::refresh(uint32_t priority) { const auto add_hosts_source = [this](HostsSource source, const HostVector& hosts) { // Nuke existing scheduler if it exists. auto& scheduler = scheduler_[source] = Scheduler{}; refreshHostSource(source); + if (isSlowStartEnabled()) { + recalculateHostsInSlowStart(hosts); + } - // Check if the original host weights are equal and skip EDF creation if they are. When all - // original weights are equal we can rely on unweighted host pick to do optimal round robin and - // least-loaded host selection with lower memory and CPU overhead. - if (hostWeightsAreEqual(hosts)) { + // Check if the original host weights are equal and no hosts are in slow start mode, in that + // case EDF creation is skipped. When all original weights are equal and no hosts are in slow + // start mode we can rely on unweighted host pick to do optimal round robin and least-loaded + // host selection with lower memory and CPU overhead. + if (hostWeightsAreEqual(hosts) && noHostsAreInSlowStart()) { // Skip edf creation. return; } - scheduler.edf_ = std::make_unique>(); // Populate scheduler with host list. @@ -812,7 +848,6 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } } }; - // Populate EdfSchedulers for each valid HostsSource value for the host set at this priority. const auto& host_set = priority_set_.hostSetsPerPriority()[priority]; add_hosts_source(HostsSource(priority, HostsSource::SourceType::AllHosts), host_set->hosts()); @@ -834,6 +869,22 @@ void EdfLoadBalancerBase::refresh(uint32_t priority) { } } +bool EdfLoadBalancerBase::isSlowStartEnabled() { + return slow_start_window_ > std::chrono::milliseconds(0); +} + +bool EdfLoadBalancerBase::noHostsAreInSlowStart() { + if (!isSlowStartEnabled()) { + return true; + } + auto current_time = time_source_.monotonicTime(); + if (std::chrono::duration_cast( + current_time - latest_host_added_time_) <= slow_start_window_) { + return false; + } + return true; +} + HostConstSharedPtr EdfLoadBalancerBase::peekAnotherHost(LoadBalancerContext* context) { if (tooManyPreconnects(stashed_random_.size(), total_healthy_hosts_)) { return nullptr; @@ -892,6 +943,36 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont } } +double EdfLoadBalancerBase::applyAggressionFactor(double time_factor) { + if (aggression_ == 1.0 || time_factor == 1.0) { + return time_factor; + } else { + return std::pow(time_factor, 1.0 / aggression_); + } +} + +double EdfLoadBalancerBase::applySlowStartFactor(double host_weight, const Host& host) { + auto host_create_duration = std::chrono::duration_cast( + time_source_.monotonicTime() - host.creationTime()); + if (host_create_duration < slow_start_window_ && + host.health() == Upstream::Host::Health::Healthy) { + aggression_ = aggression_runtime_ != absl::nullopt ? aggression_runtime_.value().value() : 1.0; + if (aggression_ < 0.0) { + ENVOY_LOG_EVERY_POW_2(error, "Invalid runtime value provided for aggression parameter, " + "agression cannot be less than 0.0"); + } + aggression_ = std::max(0.0, aggression_); + + ASSERT(aggression_ > 0.0); + auto time_factor = static_cast(std::max(std::chrono::milliseconds(1).count(), + host_create_duration.count())) / + slow_start_window_.count(); + return host_weight * applyAggressionFactor(time_factor); + } else { + return host_weight; + } +} + HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector&, const HostsSource&) { // LeastRequestLoadBalancer can not do deterministic preconnecting, because @@ -903,11 +984,13 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector& hosts_to_use, const HostsSource&) { HostSharedPtr candidate_host = nullptr; + for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use.size(); HostSharedPtr sampled_host = hosts_to_use[rand_idx]; if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. candidate_host = sampled_host; continue; diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index f38e3f576516..33bfbf758029 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -40,6 +40,18 @@ class LoadBalancerBase : public LoadBalancer { choosePriority(uint64_t hash, const HealthyLoad& healthy_per_priority_load, const DegradedLoad& degraded_per_priority_load); + // Pool selection not implemented. + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + // Lifetime tracking not implemented. + OptRef lifetimeCallbacks() override { + return {}; + } + protected: /** * For the given host_set @return if we should be in a panic mode or not. For example, if the @@ -387,12 +399,15 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * This base class also supports unweighted selection which derived classes can use to customize * behavior. Derived classes can also override how host weight is determined when in weighted mode. */ -class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { +class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase, + Logger::Loggable { public: - EdfLoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); + EdfLoadBalancerBase( + const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional slow_start_cofig, + TimeSource& time_source); // Upstream::ZoneAwareLoadBalancerBase HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) override; @@ -410,6 +425,11 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { virtual void refresh(uint32_t priority); + bool isSlowStartEnabled(); + bool noHostsAreInSlowStart(); + + virtual void recalculateHostsInSlowStart(const HostVector& hosts_added); + // Seed to allow us to desynchronize load balancers across a fleet. If we don't // do this, multiple Envoys that receive an update at the same time (or even // multiple load balancers on the same host) will send requests to @@ -417,7 +437,11 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { // overload. const uint64_t seed_; + double applyAggressionFactor(double time_factor); + double applySlowStartFactor(double host_weight, const Host& host); + private: + friend class EdfLoadBalancerBasePeer; virtual void refreshHostSource(const HostsSource& source) PURE; virtual double hostWeight(const Host& host) PURE; virtual HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, @@ -428,6 +452,15 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { // Scheduler for each valid HostsSource. absl::node_hash_map scheduler_; Common::CallbackHandlePtr priority_update_cb_; + Common::CallbackHandlePtr member_update_cb_; + +protected: + // Slow start related config + const std::chrono::milliseconds slow_start_window_; + double aggression_{1.0}; + const absl::optional aggression_runtime_; + TimeSource& time_source_; + MonotonicTime latest_host_added_time_; }; /** @@ -436,12 +469,20 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { */ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { public: - RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Random::RandomGenerator& random, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) - : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, - common_config) { + RoundRobinLoadBalancer( + const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + const absl::optional + round_robin_config, + TimeSource& time_source) + : EdfLoadBalancerBase( + priority_set, local_priority_set, stats, runtime, random, common_config, + (round_robin_config.has_value() && round_robin_config.value().has_slow_start_config()) + ? absl::optional( + round_robin_config.value().slow_start_config()) + : absl::nullopt, + time_source) { initialize(); } @@ -455,7 +496,13 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { // index. peekahead_index_ = 0; } - double hostWeight(const Host& host) override { return host.weight(); } + double hostWeight(const Host& host) override { + if (!noHostsAreInSlowStart()) { + return applySlowStartFactor(host.weight(), host); + } + return host.weight(); + } + HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, const HostsSource& source) override { auto i = rr_indexes_.find(source); @@ -498,37 +545,45 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { * The benefit of the Maglev table is at the expense of resolution, memory usage is capped. * Additionally, the Maglev table can be shared amongst all threads. */ -class LeastRequestLoadBalancer : public EdfLoadBalancerBase, - Logger::Loggable { +class LeastRequestLoadBalancer : public EdfLoadBalancerBase { public: LeastRequestLoadBalancer( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, const absl::optional - least_request_config) - : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, - common_config), + least_request_config, + TimeSource& time_source) + : EdfLoadBalancerBase( + priority_set, local_priority_set, stats, runtime, random, common_config, + (least_request_config.has_value() && + least_request_config.value().has_slow_start_config()) + ? absl::optional( + least_request_config.value().slow_start_config()) + : absl::nullopt, + time_source), choice_count_( least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.value(), choice_count, 2) : 2), active_request_bias_runtime_( least_request_config.has_value() && least_request_config->has_active_request_bias() - ? std::make_unique(least_request_config->active_request_bias(), - runtime) - : nullptr) { + ? absl::optional( + {least_request_config->active_request_bias(), runtime}) + : absl::nullopt) { initialize(); } protected: void refresh(uint32_t priority) override { - active_request_bias_ = - active_request_bias_runtime_ != nullptr ? active_request_bias_runtime_->value() : 1.0; + active_request_bias_ = active_request_bias_runtime_ != absl::nullopt + ? active_request_bias_runtime_.value().value() + : 1.0; if (active_request_bias_ < 0.0) { - ENVOY_LOG(warn, "upstream: invalid active request bias supplied (runtime key {}), using 1.0", - active_request_bias_runtime_->runtimeKey()); + ENVOY_LOG_MISC(warn, + "upstream: invalid active request bias supplied (runtime key {}), using 1.0", + active_request_bias_runtime_->runtimeKey()); active_request_bias_ = 1.0; } @@ -555,16 +610,21 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase, // // It might be possible to do better by picking two hosts off of the schedule, and selecting the // one with fewer active requests at the time of selection. - if (active_request_bias_ == 0.0) { - return host.weight(); - } + + double host_weight = static_cast(host.weight()); if (active_request_bias_ == 1.0) { - return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + host_weight = static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + } else if (active_request_bias_ != 0.0) { + host_weight = static_cast(host.weight()) / + std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); } - return static_cast(host.weight()) / - std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); + if (!noHostsAreInSlowStart()) { + return applySlowStartFactor(host_weight, host); + } else { + return host_weight; + } } HostConstSharedPtr unweightedHostPeek(const HostVector& hosts_to_use, const HostsSource& source) override; @@ -578,13 +638,14 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase, // whenever a `HostSet` is updated. double active_request_bias_{}; - const std::unique_ptr active_request_bias_runtime_; + const absl::optional active_request_bias_runtime_; }; /** * Random load balancer that picks a random host out of all hosts. */ -class RandomLoadBalancer : public ZoneAwareLoadBalancerBase { +class RandomLoadBalancer : public ZoneAwareLoadBalancerBase, + Logger::Loggable { public: RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index c51bc3c94386..c572f11d307a 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -12,6 +12,7 @@ #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/stats/scope.h" +#include "source/common/common/dns_utils.h" #include "source/common/common/fmt.h" #include "source/common/config/utility.h" #include "source/common/network/address_impl.h" @@ -122,15 +123,16 @@ void LogicalDnsCluster::startResolve() { if (status == Network::DnsResolver::ResolutionStatus::Success && !response.empty()) { info_->stats().update_success_.inc(); // TODO(mattklein123): Move port handling into the DNS interface. + uint32_t port = Network::Utility::portFromTcpUrl(dns_url_); ASSERT(response.front().address_ != nullptr); Network::Address::InstanceConstSharedPtr new_address = - Network::Utility::getAddressWithPort(*(response.front().address_), - Network::Utility::portFromTcpUrl(dns_url_)); + Network::Utility::getAddressWithPort(*(response.front().address_), port); + auto address_list = DnsUtils::generateAddressList(response, port); if (!logical_host_) { - logical_host_ = - std::make_shared(info_, hostname_, new_address, localityLbEndpoint(), - lbEndpoint(), nullptr, time_source_); + logical_host_ = std::make_shared(info_, hostname_, new_address, + address_list, localityLbEndpoint(), + lbEndpoint(), nullptr, time_source_); const auto& locality_lb_endpoint = localityLbEndpoint(); PriorityStateManager priority_state_manager(*this, local_info_, nullptr); @@ -143,12 +145,15 @@ void LogicalDnsCluster::startResolve() { absl::nullopt, absl::nullopt, absl::nullopt); } - if (!current_resolved_address_ || !(*new_address == *current_resolved_address_)) { + if (!current_resolved_address_ || + (*new_address != *current_resolved_address_ || + DnsUtils::listChanged(address_list, current_resolved_address_list_))) { current_resolved_address_ = new_address; + current_resolved_address_list_ = address_list; // Make sure that we have an updated address for admin display, health // checking, and creating real host connections. - logical_host_->setNewAddress(new_address, lbEndpoint()); + logical_host_->setNewAddresses(new_address, address_list, lbEndpoint()); } // reset failure backoff strategy because there was a success. diff --git a/source/common/upstream/logical_dns_cluster.h b/source/common/upstream/logical_dns_cluster.h index b2d13cbc9b51..233244d03bbd 100644 --- a/source/common/upstream/logical_dns_cluster.h +++ b/source/common/upstream/logical_dns_cluster.h @@ -70,6 +70,7 @@ class LogicalDnsCluster : public ClusterImplBase { std::string dns_url_; std::string hostname_; Network::Address::InstanceConstSharedPtr current_resolved_address_; + std::vector current_resolved_address_list_; LogicalHostSharedPtr logical_host_; Network::ActiveDnsQuery* active_dns_query_{}; const LocalInfo::LocalInfo& local_info_; diff --git a/source/common/upstream/logical_host.h b/source/common/upstream/logical_host.h index bb9c820ec367..e2ca34b13701 100644 --- a/source/common/upstream/logical_host.h +++ b/source/common/upstream/logical_host.h @@ -18,6 +18,7 @@ class LogicalHost : public HostImpl { LogicalHost( const ClusterInfoConstSharedPtr& cluster, const std::string& hostname, const Network::Address::InstanceConstSharedPtr& address, + const std::vector& address_list, const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, const Network::TransportSocketOptionsConstSharedPtr& override_transport_socket_options, @@ -28,20 +29,27 @@ class LogicalHost : public HostImpl { lb_endpoint.load_balancing_weight().value(), locality_lb_endpoint.locality(), lb_endpoint.endpoint().health_check_config(), locality_lb_endpoint.priority(), lb_endpoint.health_status(), time_source), - override_transport_socket_options_(override_transport_socket_options) {} + override_transport_socket_options_(override_transport_socket_options) { + setAddressList(address_list); + } // Set the new address. Updates are typically rare so a R/W lock is used for address updates. // Note that the health check address update requires no lock to be held since it is only // used on the main thread, but we do so anyway since it shouldn't be perf critical and will // future proof the code. - void setNewAddress(const Network::Address::InstanceConstSharedPtr& address, - const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint) { + void setNewAddresses(const Network::Address::InstanceConstSharedPtr& address, + const std::vector& address_list, + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint) { const auto& port_value = lb_endpoint.endpoint().health_check_config().port_value(); auto health_check_address = port_value == 0 ? address : Network::Utility::getAddressWithPort(*address, port_value); absl::WriterMutexLock lock(&address_lock_); setAddress(address); + setAddressList(address_list); + // TODO: the health checker only gets the first address in the list and + // will not walk the full happy eyeballs list. We should eventually fix + // this. setHealthCheckAddress(health_check_address); } diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index 2ee8eceac501..589991eb612f 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -56,6 +56,17 @@ class OriginalDstCluster : public ClusterImplBase { HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; // Preconnecting is not implemented for OriginalDstCluster HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } + // Pool selection not implemented for OriginalDstCluster + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + // Lifetime tracking not implemented for OriginalDstCluster + OptRef lifetimeCallbacks() override { + return {}; + } private: Network::Address::InstanceConstSharedPtr requestOverrideHost(LoadBalancerContext* context); diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc index d8ce7b1a6ea0..b1656fe2ee69 100644 --- a/source/common/upstream/static_cluster.cc +++ b/source/common/upstream/static_cluster.cc @@ -25,6 +25,12 @@ StaticClusterImpl::StaticClusterImpl( for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { validateEndpointsForZoneAwareRouting(locality_lb_endpoint); priority_state_manager_->initializePriorityFor(locality_lb_endpoint); + // TODO(adisuissa): Implement LEDS support for STATIC clusters. + if (locality_lb_endpoint.has_leds_cluster_locality_config()) { + throw EnvoyException( + fmt::format("LEDS is only supported when EDS is used. Static cluster {} cannot use LEDS.", + cluster.name())); + } for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager_->registerHostForPriority( lb_endpoint.endpoint().hostname(), resolveProtoAddress(lb_endpoint.endpoint().address()), diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 4c5a420a9450..b5bf551cc667 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -26,19 +26,23 @@ SubsetLoadBalancer::SubsetLoadBalancer( const absl::optional& lb_ring_hash_config, const absl::optional& lb_maglev_config, + const absl::optional& + round_robin_config, const absl::optional& least_request_config, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + TimeSource& time_source) : lb_type_(lb_type), lb_ring_hash_config_(lb_ring_hash_config), - lb_maglev_config_(lb_maglev_config), least_request_config_(least_request_config), - common_config_(common_config), stats_(stats), scope_(scope), runtime_(runtime), - random_(random), fallback_policy_(subsets.fallbackPolicy()), + lb_maglev_config_(lb_maglev_config), round_robin_config_(round_robin_config), + least_request_config_(least_request_config), common_config_(common_config), stats_(stats), + scope_(scope), runtime_(runtime), random_(random), fallback_policy_(subsets.fallbackPolicy()), default_subset_metadata_(subsets.defaultSubset().fields().begin(), subsets.defaultSubset().fields().end()), subset_selectors_(subsets.subsetSelectors()), original_priority_set_(priority_set), original_local_priority_set_(local_priority_set), locality_weight_aware_(subsets.localityWeightAware()), - scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()) { + scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()), + time_source_(time_source) { ASSERT(subsets.isEnabled()); if (fallback_policy_ != envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK) { @@ -751,7 +755,8 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan case LoadBalancerType::LeastRequest: lb_ = std::make_unique( *this, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, - subset_lb.random_, subset_lb.common_config_, subset_lb.least_request_config_); + subset_lb.random_, subset_lb.common_config_, subset_lb.least_request_config_, + subset_lb.time_source_); break; case LoadBalancerType::Random: @@ -761,9 +766,10 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan break; case LoadBalancerType::RoundRobin: - lb_ = std::make_unique(*this, subset_lb.original_local_priority_set_, - subset_lb.stats_, subset_lb.runtime_, - subset_lb.random_, subset_lb.common_config_); + lb_ = std::make_unique( + *this, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, + subset_lb.random_, subset_lb.common_config_, subset_lb.round_robin_config_, + subset_lb.time_source_); break; case LoadBalancerType::RingHash: diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 354341ff060c..977285287948 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -30,15 +30,29 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& lb_ring_hash_config, const absl::optional& lb_maglev_config, + const absl::optional& + round_robin_config, const absl::optional& least_request_config, - const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); + const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, + TimeSource& time_source); ~SubsetLoadBalancer() override; // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; // TODO(alyssawilk) implement for non-metadata match. HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } + // Pool selection not implemented. + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + // Lifetime tracking not implemented. + OptRef lifetimeCallbacks() override { + return {}; + } private: using HostPredicate = std::function; @@ -239,6 +253,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable lb_ring_hash_config_; const absl::optional lb_maglev_config_; + const absl::optional round_robin_config_; const absl::optional least_request_config_; const envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; @@ -280,6 +295,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + // Lifetime tracking not implemented. + OptRef lifetimeCallbacks() override { + return {}; + } protected: ThreadAwareLoadBalancerBase( @@ -115,6 +126,15 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; // Preconnect not implemented for hash based load balancing HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + OptRef lifetimeCallbacks() override { + return {}; + } ClusterStats& stats_; Random::RandomGenerator& random_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index eb9deded0403..7f38f7bdc665 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -676,8 +676,9 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { // other contexts taken from TransportSocketFactoryContext. FactoryContextImpl(Stats::Scope& stats_scope, Envoy::Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& c) - : admin_(c.admin()), stats_scope_(stats_scope), cluster_manager_(c.clusterManager()), - local_info_(c.localInfo()), dispatcher_(c.mainThreadDispatcher()), runtime_(runtime), + : admin_(c.admin()), server_scope_(c.stats()), stats_scope_(stats_scope), + cluster_manager_(c.clusterManager()), local_info_(c.localInfo()), + dispatcher_(c.mainThreadDispatcher()), runtime_(runtime), singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), api_(c.api()), options_(c.options()), message_validation_visitor_(c.messageValidationVisitor()) {} @@ -687,6 +688,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Stats::Scope& scope() override { return stats_scope_; } + Stats::Scope& serverScope() override { return server_scope_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } Server::Admin& admin() override { return admin_; } @@ -719,6 +721,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { private: Server::Admin& admin_; + Stats::Scope& server_scope_; Stats::Scope& stats_scope_; Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; @@ -833,72 +836,42 @@ ClusterInfoImpl::ClusterInfoImpl( "HttpProtocolOptions can be specified"); } - switch (config.lb_policy()) { - case envoy::config::cluster::v3::Cluster::ROUND_ROBIN: - lb_type_ = LoadBalancerType::RoundRobin; - break; - case envoy::config::cluster::v3::Cluster::LEAST_REQUEST: - lb_type_ = LoadBalancerType::LeastRequest; - break; - case envoy::config::cluster::v3::Cluster::RANDOM: - lb_type_ = LoadBalancerType::Random; - break; - case envoy::config::cluster::v3::Cluster::RING_HASH: - lb_type_ = LoadBalancerType::RingHash; - break; - case envoy::config::cluster::v3::Cluster::MAGLEV: - lb_type_ = LoadBalancerType::Maglev; - break; - case envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED: - if (config.has_lb_subset_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - lb_type_ = LoadBalancerType::ClusterProvided; - break; - case envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG: { - if (config.has_lb_subset_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - if (config.has_common_lb_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with common_lb_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - if (!config.has_load_balancing_policy()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} requires load_balancing_policy to be set", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - for (const auto& policy : config.load_balancing_policy().policies()) { - TypedLoadBalancerFactory* factory = - Config::Utility::getAndCheckFactory( - policy.typed_extension_config(), /*is_optional=*/true); - if (factory != nullptr) { - load_balancing_policy_ = policy; - load_balancer_factory_ = factory; - break; + // If load_balancing_policy is set we will use it directly, ignoring lb_policy. + if (config.has_load_balancing_policy()) { + configureLbPolicies(config); + } else { + switch (config.lb_policy()) { + case envoy::config::cluster::v3::Cluster::ROUND_ROBIN: + lb_type_ = LoadBalancerType::RoundRobin; + break; + case envoy::config::cluster::v3::Cluster::LEAST_REQUEST: + lb_type_ = LoadBalancerType::LeastRequest; + break; + case envoy::config::cluster::v3::Cluster::RANDOM: + lb_type_ = LoadBalancerType::Random; + break; + case envoy::config::cluster::v3::Cluster::RING_HASH: + lb_type_ = LoadBalancerType::RingHash; + break; + case envoy::config::cluster::v3::Cluster::MAGLEV: + lb_type_ = LoadBalancerType::Maglev; + break; + case envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED: + if (config.has_lb_subset_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); } - } - if (load_balancer_factory_ == nullptr) { - throw EnvoyException(fmt::format( - "Didn't find a registered load balancer factory implementation for cluster: '{}'", - name_)); + lb_type_ = LoadBalancerType::ClusterProvided; + break; + case envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG: { + configureLbPolicies(config); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; } - - lb_type_ = LoadBalancerType::LoadBalancingPolicyConfig; - break; - } - default: - NOT_REACHED_GCOVR_EXCL_LINE; } if (config.lb_subset_config().locality_weight_aware() && @@ -918,6 +891,16 @@ ClusterInfoImpl::ClusterInfoImpl( idle_timeout_ = std::chrono::hours(1); } + if (http_protocol_options_->common_http_protocol_options_.has_max_connection_duration()) { + max_connection_duration_ = std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + http_protocol_options_->common_http_protocol_options_.max_connection_duration())); + if (max_connection_duration_.value().count() == 0) { + max_connection_duration_ = absl::nullopt; + } + } else { + max_connection_duration_ = absl::nullopt; + } + if (config.has_eds_cluster_config()) { if (config.type() != envoy::config::cluster::v3::Cluster::EDS) { throw EnvoyException("eds_cluster_config set in a non-EDS cluster"); @@ -947,6 +930,45 @@ ClusterInfoImpl::ClusterInfoImpl( } } +// Configures the load balancer based on config.load_balancing_policy +void ClusterInfoImpl::configureLbPolicies(const envoy::config::cluster::v3::Cluster& config) { + if (config.has_lb_subset_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (config.has_common_lb_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with common_lb_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (!config.has_load_balancing_policy()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} requires load_balancing_policy to be set", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + for (const auto& policy : config.load_balancing_policy().policies()) { + TypedLoadBalancerFactory* factory = + Config::Utility::getAndCheckFactory( + policy.typed_extension_config(), /*is_optional=*/true); + if (factory != nullptr) { + load_balancing_policy_ = policy; + load_balancer_factory_ = factory; + break; + } + } + + if (load_balancer_factory_ == nullptr) { + throw EnvoyException(fmt::format( + "Didn't find a registered load balancer factory implementation for cluster: '{}'", name_)); + } + + lb_type_ = LoadBalancerType::LoadBalancingPolicyConfig; +} + ProtocolOptionsConfigConstSharedPtr ClusterInfoImpl::extensionProtocolOptions(const std::string& name) const { auto i = extension_protocol_options_.find(name); @@ -1084,9 +1106,7 @@ namespace { bool excludeBasedOnHealthFlag(const Host& host) { return host.healthFlagGet(Host::HealthFlag::PENDING_ACTIVE_HC) || - (host.healthFlagGet(Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL) && - Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster")); + host.healthFlagGet(Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL); } } // namespace @@ -1606,14 +1626,11 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( } if (existing_host->second->weight() != host->weight()) { existing_host->second->weight(host->weight()); - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.upstream_host_weight_change_causes_rebuild")) { - // We do full host set rebuilds so that load balancers can do pre-computation of data - // structures based on host weight. This may become a performance problem in certain - // deployments so it is runtime feature guarded and may also need to be configurable - // and/or dynamic in the future. - hosts_changed = true; - } + // We do full host set rebuilds so that load balancers can do pre-computation of data + // structures based on host weight. This may become a performance problem in certain + // deployments so it is runtime feature guarded and may also need to be configurable + // and/or dynamic in the future. + hosts_changed = true; } hosts_changed |= @@ -1771,6 +1788,8 @@ getDnsLookupFamilyFromEnum(envoy::config::cluster::v3::Cluster::DnsLookupFamily return Network::DnsLookupFamily::Auto; case envoy::config::cluster::v3::Cluster::V4_PREFERRED: return Network::DnsLookupFamily::V4Preferred; + case envoy::config::cluster::v3::Cluster::ALL: + return Network::DnsLookupFamily::All; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 91b9c2133a88..56486741bcb1 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -613,6 +613,9 @@ class ClusterInfoImpl : public ClusterInfo, const absl::optional idleTimeout() const override { return idle_timeout_; } + const absl::optional maxConnectionDuration() const override { + return max_connection_duration_; + } float perUpstreamPreconnectRatio() const override { return per_upstream_preconnect_ratio_; } float peekaheadRatio() const override { return peekahead_ratio_; } uint32_t perConnectionBufferLimitBytes() const override { @@ -631,6 +634,7 @@ class ClusterInfoImpl : public ClusterInfo, const envoy::config::core::v3::HttpProtocolOptions& commonHttpProtocolOptions() const override { return http_protocol_options_->common_http_protocol_options_; } + void configureLbPolicies(const envoy::config::cluster::v3::Cluster& config); ProtocolOptionsConfigConstSharedPtr extensionProtocolOptions(const std::string& name) const override; LoadBalancerType lbType() const override { return lb_type_; } @@ -639,6 +643,10 @@ class ClusterInfoImpl : public ClusterInfo, clusterType() const override { return cluster_type_; } + const absl::optional& + lbRoundRobinConfig() const override { + return lb_round_robin_config_; + } const absl::optional& lbLeastRequestConfig() const override { return lb_least_request_config_; @@ -765,6 +773,7 @@ class ClusterInfoImpl : public ClusterInfo, const uint32_t max_response_headers_count_; const std::chrono::milliseconds connect_timeout_; absl::optional idle_timeout_; + absl::optional max_connection_duration_; const float per_upstream_preconnect_ratio_; const float peekahead_ratio_; const uint32_t per_connection_buffer_limit_bytes_; @@ -779,6 +788,7 @@ class ClusterInfoImpl : public ClusterInfo, const std::string maintenance_mode_runtime_key_; const Network::Address::InstanceConstSharedPtr source_address_; LoadBalancerType lb_type_; + absl::optional lb_round_robin_config_; absl::optional lb_least_request_config_; absl::optional lb_ring_hash_config_; diff --git a/source/common/watchdog/BUILD b/source/common/watchdog/BUILD index 21fbfd3302ce..ce5210c0c245 100644 --- a/source/common/watchdog/BUILD +++ b/source/common/watchdog/BUILD @@ -19,7 +19,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/protobuf:utility_lib", "//source/common/thread:terminate_thread_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -33,6 +33,6 @@ envoy_cc_library( "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:message_validator_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) diff --git a/source/common/watchdog/abort_action.cc b/source/common/watchdog/abort_action.cc index daa405fdda24..d1461af7bced 100644 --- a/source/common/watchdog/abort_action.cc +++ b/source/common/watchdog/abort_action.cc @@ -14,7 +14,7 @@ namespace { constexpr uint64_t DefaultWaitDurationMs = 5000; } // end namespace -AbortAction::AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, +AbortAction::AbortAction(envoy::watchdog::v3::AbortActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& /*context*/) : wait_duration_(absl::Milliseconds( PROTOBUF_GET_MS_OR_DEFAULT(config, wait_duration, DefaultWaitDurationMs))) {} diff --git a/source/common/watchdog/abort_action.h b/source/common/watchdog/abort_action.h index 5170c8bbea00..e6291657db1d 100644 --- a/source/common/watchdog/abort_action.h +++ b/source/common/watchdog/abort_action.h @@ -2,7 +2,7 @@ #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" namespace Envoy { namespace Watchdog { @@ -12,7 +12,7 @@ namespace Watchdog { */ class AbortAction : public Server::Configuration::GuardDogAction { public: - AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, + AbortAction(envoy::watchdog::v3::AbortActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, diff --git a/source/common/watchdog/abort_action_config.h b/source/common/watchdog/abort_action_config.h index 54f2169bf15f..65541a4bfdd4 100644 --- a/source/common/watchdog/abort_action_config.h +++ b/source/common/watchdog/abort_action_config.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/server/guarddog_config.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/protobuf/protobuf.h" @@ -22,7 +22,7 @@ class AbortActionFactory : public Server::Configuration::GuardDogActionFactory { std::string name() const override { return "envoy.watchdog.abort_action"; } - using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; + using AbortActionConfig = envoy::watchdog::v3::AbortActionConfig; }; } // namespace Watchdog diff --git a/source/exe/BUILD b/source/exe/BUILD index f3fb551eb83a..0eb4078c164c 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -22,6 +22,10 @@ alias( envoy_cc_binary( name = "envoy-static", + features = select({ + "//bazel:windows_opt_build": ["generate_pdb_file"], + "//conditions:default": [], + }), stamped = True, deps = [":envoy_main_entry_lib"], ) diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index b13edd3fc792..2615d76b9eb7 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -225,6 +225,7 @@ int MainCommon::main(int argc, char** argv, PostServerHook hook) { // handling, such as running in a chroot jail. absl::InitializeSymbolizer(argv[0]); #endif + Thread::MainThread main_thread; std::unique_ptr main_common; // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE diff --git a/source/exe/main_common.h b/source/exe/main_common.h index d61ecdac4ee0..a393c841e8b3 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -138,6 +138,8 @@ class MainCommon { static int main(int argc, char** argv, PostServerHook hook = nullptr); private: + Thread::MainThread main_thread_; + #ifdef ENVOY_HANDLE_SIGNALS Envoy::SignalAction handle_sigs_; Envoy::TerminateHandler log_on_terminate_; diff --git a/source/extensions/access_loggers/common/grpc_access_logger.h b/source/extensions/access_loggers/common/grpc_access_logger.h index 3cd627fcf7ae..921186875fbe 100644 --- a/source/extensions/access_loggers/common/grpc_access_logger.h +++ b/source/extensions/access_loggers/common/grpc_access_logger.h @@ -68,9 +68,8 @@ template class GrpcAccessLogge * @param config supplies the configuration for the logger. * @return GrpcAccessLoggerSharedPtr ready for logging requests. */ - virtual typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, - GrpcAccessLoggerType logger_type, - Stats::Scope& scope) PURE; + virtual typename GrpcAccessLogger::SharedPtr + getOrCreateLogger(const ConfigProto& config, GrpcAccessLoggerType logger_type) PURE; }; template class GrpcAccessLogClient { @@ -252,15 +251,14 @@ class GrpcAccessLoggerCache : public Singleton::Instance, GrpcAccessLoggerCache(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls) - : async_client_manager_(async_client_manager), scope_(scope), tls_slot_(tls.allocateSlot()) { + : scope_(scope), async_client_manager_(async_client_manager), tls_slot_(tls.allocateSlot()) { tls_slot_->set([](Event::Dispatcher& dispatcher) { return std::make_shared(dispatcher); }); } - typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, - GrpcAccessLoggerType logger_type, - Stats::Scope& scope) override { + typename GrpcAccessLogger::SharedPtr + getOrCreateLogger(const ConfigProto& config, GrpcAccessLoggerType logger_type) override { // TODO(euroelessar): Consider cleaning up loggers. auto& cache = tls_slot_->getTyped(); const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type); @@ -277,12 +275,14 @@ class GrpcAccessLoggerCache : public Singleton::Instance, const auto logger = createLogger( config, std::move(client), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, - scope); + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_); cache.access_loggers_.emplace(cache_key, logger); return logger; } +protected: + Stats::Scope& scope_; + private: /** * Per-thread cache. @@ -301,10 +301,9 @@ class GrpcAccessLoggerCache : public Singleton::Instance, virtual typename GrpcAccessLogger::SharedPtr createLogger(const ConfigProto& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) PURE; + Event::Dispatcher& dispatcher) PURE; Grpc::AsyncClientManager& async_client_manager_; - Stats::Scope& scope_; ThreadLocal::SlotPtr tls_slot_; }; diff --git a/source/extensions/access_loggers/grpc/config_utils.cc b/source/extensions/access_loggers/grpc/config_utils.cc index 0010109617cd..e74a2892a826 100644 --- a/source/extensions/access_loggers/grpc/config_utils.cc +++ b/source/extensions/access_loggers/grpc/config_utils.cc @@ -15,7 +15,7 @@ getGrpcAccessLoggerCacheSingleton(Server::Configuration::CommonFactoryContext& c return context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(grpc_access_logger_cache), [&context] { return std::make_shared( - context.clusterManager().grpcAsyncClientManager(), context.scope(), + context.clusterManager().grpcAsyncClientManager(), context.serverScope(), context.threadLocal(), context.localInfo()); }); } diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index e544a52af191..ca45d2c5acaf 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -53,10 +53,10 @@ GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) { + Event::Dispatcher& dispatcher) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope); + dispatcher, local_info_, scope_); } } // namespace GrpcCommon diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 43b542327476..c502f4365d89 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -54,7 +54,7 @@ class GrpcAccessLoggerCacheImpl createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override; + Event::Dispatcher& dispatcher) override; const LocalInfo::LocalInfo& local_info_; }; diff --git a/source/extensions/access_loggers/grpc/http_config.cc b/source/extensions/access_loggers/grpc/http_config.cc index 6692a73f1d6d..5d3b79510067 100644 --- a/source/extensions/access_loggers/grpc/http_config.cc +++ b/source/extensions/access_loggers/grpc/http_config.cc @@ -31,9 +31,9 @@ AccessLog::InstanceSharedPtr HttpGrpcAccessLogFactory::createAccessLogInstance( if (service_config.has_envoy_grpc()) { context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); } - return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), - context.scope()); + return std::make_shared( + std::move(filter), proto_config, context.threadLocal(), + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr HttpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index c5bc8c1f2c49..e3de3291a40c 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -26,9 +26,8 @@ HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( HttpGrpcAccessLog::HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, const HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { for (const auto& header : config_->additional_request_headers_to_log()) { @@ -43,13 +42,11 @@ HttpGrpcAccessLog::HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, response_trailers_to_log_.emplace_back(header); } Envoy::Config::Utility::checkTransportVersion(config_->common_config()); - // Note that &scope might have died by the time when this callback is called on each thread. - // This is supposed to be fixed by https://github.com/envoyproxy/envoy/issues/18066. - tls_slot_->set([config = config_, access_logger_cache = access_logger_cache_, - &scope = scope_](Event::Dispatcher&) { - return std::make_shared(access_logger_cache->getOrCreateLogger( - config->common_config(), Common::GrpcAccessLoggerType::HTTP, scope)); - }); + tls_slot_->set( + [config = config_, access_logger_cache = access_logger_cache_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::HTTP)); + }); } void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index 25da566a1035..6cfaf97d5617 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -31,8 +31,7 @@ class HttpGrpcAccessLog : public Common::ImplBase { public: HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, const HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -50,7 +49,6 @@ class HttpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; const HttpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; diff --git a/source/extensions/access_loggers/grpc/tcp_config.cc b/source/extensions/access_loggers/grpc/tcp_config.cc index e259a2c5f779..495cdfa3738c 100644 --- a/source/extensions/access_loggers/grpc/tcp_config.cc +++ b/source/extensions/access_loggers/grpc/tcp_config.cc @@ -32,8 +32,7 @@ AccessLog::InstanceSharedPtr TcpGrpcAccessLogFactory::createAccessLogInstance( context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); } return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), - context.scope()); + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr TcpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index 63eb08d4b277..fb1a2a4d0bd2 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -20,19 +20,16 @@ TcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLog TcpGrpcAccessLog::TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, const TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { Config::Utility::checkTransportVersion(config_->common_config()); - // Note that &scope might have died by the time when this callback is called on each thread. - // This is supposed to be fixed by https://github.com/envoyproxy/envoy/issues/18066. - tls_slot_->set([config = config_, access_logger_cache = access_logger_cache_, - &scope = scope_](Event::Dispatcher&) { - return std::make_shared(access_logger_cache->getOrCreateLogger( - config->common_config(), Common::GrpcAccessLoggerType::TCP, scope)); - }); + tls_slot_->set( + [config = config_, access_logger_cache = access_logger_cache_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::TCP)); + }); } void TcpGrpcAccessLog::emitLog(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index a0b3842a49df..897091d0367a 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -30,8 +30,7 @@ class TcpGrpcAccessLog : public Common::ImplBase { public: TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, const TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -49,7 +48,6 @@ class TcpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; const TcpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; diff --git a/source/extensions/access_loggers/open_telemetry/BUILD b/source/extensions/access_loggers/open_telemetry/BUILD index 8ddd831a9fdd..1599f25a4c9c 100644 --- a/source/extensions/access_loggers/open_telemetry/BUILD +++ b/source/extensions/access_loggers/open_telemetry/BUILD @@ -23,7 +23,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/extensions/access_loggers/common:grpc_access_logger", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], ) @@ -42,7 +42,7 @@ envoy_cc_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], ) @@ -69,6 +69,6 @@ envoy_cc_extension( "//source/extensions/access_loggers/open_telemetry:access_log_lib", "//source/extensions/access_loggers/open_telemetry:access_log_proto_descriptors_lib", "//source/extensions/access_loggers/open_telemetry:grpc_access_log_lib", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc index 48c4166395da..38b6a5d644b9 100644 --- a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc @@ -5,7 +5,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/data/accesslog/v3/accesslog.pb.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "source/common/common/assert.h" #include "source/common/config/utility.h" @@ -34,16 +34,15 @@ AccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcAccessLoggerSharedPtr logger AccessLog::AccessLog( ::Envoy::AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), tls_slot_(tls.allocateSlot()), + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache) + : Common::ImplBase(std::move(filter)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { Envoy::Config::Utility::checkTransportVersion(config.common_config()); tls_slot_->set([this, config](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); + config.common_config(), Common::GrpcAccessLoggerType::HTTP)); }); ProtobufWkt::Struct body_format; diff --git a/source/extensions/access_loggers/open_telemetry/access_log_impl.h b/source/extensions/access_loggers/open_telemetry/access_log_impl.h index 1bd6b34804c8..7a36bad2639e 100644 --- a/source/extensions/access_loggers/open_telemetry/access_log_impl.h +++ b/source/extensions/access_loggers/open_telemetry/access_log_impl.h @@ -4,7 +4,7 @@ #include #include "envoy/access_log/access_log.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/grpc/async_client.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/local_info/local_info.h" @@ -33,11 +33,10 @@ namespace OpenTelemetry { */ class AccessLog : public Common::ImplBase { public: - AccessLog(::Envoy::AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig - config, - ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope); + AccessLog( + ::Envoy::AccessLog::FilterPtr&& filter, + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, GrpcAccessLoggerCacheSharedPtr access_logger_cache); private: /** @@ -55,7 +54,6 @@ class AccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; - Stats::Scope& scope_; const ThreadLocal::SlotPtr tls_slot_; const GrpcAccessLoggerCacheSharedPtr access_logger_cache_; std::unique_ptr body_formatter_; diff --git a/source/extensions/access_loggers/open_telemetry/config.cc b/source/extensions/access_loggers/open_telemetry/config.cc index 2b0f02f3acd2..f23640456c3b 100644 --- a/source/extensions/access_loggers/open_telemetry/config.cc +++ b/source/extensions/access_loggers/open_telemetry/config.cc @@ -1,7 +1,7 @@ #include "source/extensions/access_loggers/open_telemetry/config.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.validate.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/server/access_log_config.h" #include "envoy/server/filter_config.h" @@ -37,18 +37,17 @@ AccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, Server::Configuration::CommonFactoryContext& context) { validateProtoDescriptors(); - const auto& proto_config = - MessageUtil::downcastAndValidate( - config, context.messageValidationVisitor()); + const auto& proto_config = MessageUtil::downcastAndValidate< + const envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig&>( + config, context.messageValidationVisitor()); return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - getAccessLoggerCacheSingleton(context), context.scope()); + getAccessLoggerCacheSingleton(context)); } ProtobufTypes::MessagePtr AccessLogFactory::createEmptyConfigProto() { return std::make_unique< - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig>(); + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig>(); } std::string AccessLogFactory::name() const { return "envoy.access_loggers.open_telemetry"; } diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc index 215f7cfba9e4..38d9616922a9 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc @@ -1,7 +1,7 @@ #include "source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/grpc/async_client_manager.h" #include "envoy/local_info/local_info.h" @@ -76,10 +76,10 @@ GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) { + Event::Dispatcher& dispatcher) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope); + dispatcher, local_info_, scope_); } } // namespace OpenTelemetry diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h index 7af83f529de4..85aa0ad8d694 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h @@ -68,7 +68,7 @@ class GrpcAccessLoggerCacheImpl createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override; + Event::Dispatcher& dispatcher) override; const LocalInfo::LocalInfo& local_info_; }; diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index e02b98e25030..7e246d847877 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -7,6 +7,8 @@ _required_extensions = { "envoy.http.original_ip_detection.xff": "//source/extensions/http/original_ip_detection/xff:config", "envoy.request_id.uuid": "//source/extensions/request_id/uuid:config", "envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config", + "envoy.network.dns_resolver.cares": "//source/extensions/network/dns_resolver/cares:config", + "envoy.network.dns_resolver.apple": "//source/extensions/network/dns_resolver/apple:config", } # Return the extension cc_library target after select @@ -29,6 +31,8 @@ _core_extensions = [ "envoy.filters.network.http_connection_manager", "envoy.stat_sinks.statsd", "envoy.transport_sockets.raw_buffer", + "envoy.network.dns_resolver.cares", + "envoy.network.dns_resolver.apple", ] # Return all core extensions to be compiled into Envoy. diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index 6e6b7ab77cab..c76a42ca95f8 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -180,6 +180,32 @@ AggregateClusterLoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) return nullptr; } +Upstream::HostConstSharedPtr +AggregateClusterLoadBalancer::peekAnotherHost(Upstream::LoadBalancerContext* context) { + if (load_balancer_) { + return load_balancer_->peekAnotherHost(context); + } + return nullptr; +} + +absl::optional +AggregateClusterLoadBalancer::selectExistingConnection(Upstream::LoadBalancerContext* context, + const Upstream::Host& host, + std::vector& hash_key) { + if (load_balancer_) { + return load_balancer_->selectExistingConnection(context, host, hash_key); + } + return absl::nullopt; +} + +OptRef +AggregateClusterLoadBalancer::lifetimeCallbacks() { + if (load_balancer_) { + return load_balancer_->lifetimeCallbacks(); + } + return {}; +} + std::pair ClusterFactory::createClusterWithConfig( const envoy::config::cluster::v3::Cluster& cluster, diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 5c23d632d3e8..6d961fd2e0f3 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -78,10 +78,12 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer, // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; - // Preconnecting not yet implemented for extensions. - Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { - return nullptr; - } + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override; + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override; + OptRef lifetimeCallbacks() override; private: // Use inner class to extend LoadBalancerBase. When initializing AggregateClusterLoadBalancer, the @@ -101,6 +103,15 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer, Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return {}; + } + OptRef lifetimeCallbacks() override { + return {}; + } absl::optional hostToLinearizedPriority(const Upstream::HostDescription& host) const; diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index 44da3c9be55d..8e50111a9f6a 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -18,6 +18,8 @@ envoy_cc_extension( "//source/common/upstream:logical_host_lib", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl", + "//source/extensions/filters/network/common:utility_lib", + "//source/extensions/transport_sockets/tls/cert_validator:cert_validator_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg_cc_proto", diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index 62209fbd8b98..915e0a4f2d8e 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -1,12 +1,17 @@ #include "source/extensions/clusters/dynamic_forward_proxy/cluster.h" +#include + #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.h" #include "envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.validate.h" +#include "source/common/http/utility.h" #include "source/common/network/transport_socket_options_impl.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h" #include "source/extensions/filters/network/common/utility.h" +#include "source/extensions/transport_sockets/tls/cert_validator/default_validator.h" +#include "source/extensions/transport_sockets/tls/utility.h" namespace Envoy { namespace Extensions { @@ -26,7 +31,8 @@ Cluster::Cluster( factory_context.mainThreadDispatcher().timeSource()), dns_cache_manager_(cache_manager_factory.get()), dns_cache_(dns_cache_manager_->getCache(config.dns_cache_config())), - update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info) {} + update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info), + allow_coalesced_connections_(config.allow_coalesced_connections()) {} void Cluster::startPreInit() { // If we are attaching to a pre-populated cache we need to initialize our hosts. @@ -78,7 +84,8 @@ void Cluster::addOrUpdateHost( ASSERT(host_map_it->second.shared_host_info_->address() != host_map_it->second.logical_host_->address()); ENVOY_LOG(debug, "updating dfproxy cluster host address '{}'", host); - host_map_it->second.logical_host_->setNewAddress(host_info->address(), dummy_lb_endpoint_); + host_map_it->second.logical_host_->setNewAddresses( + host_info->address(), host_info->addressList(), dummy_lb_endpoint_); return; } @@ -88,8 +95,8 @@ void Cluster::addOrUpdateHost( .try_emplace(host, host_info, std::make_shared( info(), std::string{host}, host_info->address(), - dummy_locality_lb_endpoint_, dummy_lb_endpoint_, nullptr, - time_source_)) + host_info->addressList(), dummy_locality_lb_endpoint_, + dummy_lb_endpoint_, nullptr, time_source_)) .first->second.logical_host_; } @@ -170,6 +177,72 @@ Cluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) { } } +absl::optional +Cluster::LoadBalancer::selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& host, + std::vector& hash_key) { + const std::string& hostname = host.hostname(); + if (hostname.empty()) { + return absl::nullopt; + } + + LookupKey key = {hash_key, *host.address()}; + auto it = connection_info_map_.find(key); + if (it == connection_info_map_.end()) { + return absl::nullopt; + } + + for (auto& info : it->second) { + Envoy::Ssl::ConnectionInfoConstSharedPtr ssl = info.connection_->ssl(); + ASSERT(ssl); + for (const std::string& san : ssl->dnsSansPeerCertificate()) { + if (Extensions::TransportSockets::Tls::Utility::dnsNameMatch(hostname, san)) { + return Upstream::SelectedPoolAndConnection{*info.pool_, *info.connection_}; + } + } + } + + return absl::nullopt; +} + +OptRef +Cluster::LoadBalancer::lifetimeCallbacks() { + if (!cluster_.allowCoalescedConnections()) { + return {}; + } + return makeOptRef(*this); +} + +void Cluster::LoadBalancer::onConnectionOpen(Envoy::Http::ConnectionPool::Instance& pool, + std::vector& hash_key, + const Network::Connection& connection) { + // Only coalesce connections that are over TLS. + if (!connection.ssl()) { + return; + } + const std::string alpn = connection.nextProtocol(); + if (alpn != Http::Utility::AlpnNames::get().Http2 && + alpn != Http::Utility::AlpnNames::get().Http3) { + // Only coalesce connections for HTTP/2 and HTTP/3. + return; + } + const LookupKey key = {hash_key, *connection.connectionInfoProvider().remoteAddress()}; + ConnectionInfo info = {&pool, &connection}; + connection_info_map_[key].push_back(info); +} + +void Cluster::LoadBalancer::onConnectionDraining(Envoy::Http::ConnectionPool::Instance& pool, + std::vector& hash_key, + const Network::Connection& connection) { + const LookupKey key = {hash_key, *connection.connectionInfoProvider().remoteAddress()}; + connection_info_map_[key].erase( + std::remove_if(connection_info_map_[key].begin(), connection_info_map_[key].end(), + [&pool, &connection](const ConnectionInfo& info) { + return (info.pool_ == &pool && info.connection_ == &connection); + }), + connection_info_map_[key].end()); +} + std::pair ClusterFactory::createClusterWithConfig( const envoy::config::cluster::v3::Cluster& cluster, diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.h b/source/extensions/clusters/dynamic_forward_proxy/cluster.h index e7266b97692e..bea82bdd93ba 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.h +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.h @@ -4,6 +4,7 @@ #include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.h" #include "envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.pb.validate.h" +#include "envoy/http/conn_pool.h" #include "source/common/upstream/cluster_factory_impl.h" #include "source/common/upstream/logical_host.h" @@ -39,6 +40,8 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info) override; void onDnsHostRemove(const std::string& host) override; + bool allowCoalescedConnections() const { return allow_coalesced_connections_; } + private: struct HostInfo { HostInfo(const Extensions::Common::DynamicForwardProxy::DnsHostInfoSharedPtr& shared_host_info, @@ -51,7 +54,8 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, using HostInfoMap = absl::flat_hash_map; - class LoadBalancer : public Upstream::LoadBalancer { + class LoadBalancer : public Upstream::LoadBalancer, + public Envoy::Http::ConnectionPool::ConnectionLifetimeCallbacks { public: LoadBalancer(const Cluster& cluster) : cluster_(cluster) {} @@ -61,8 +65,40 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* context, const Upstream::Host& host, + std::vector& hash_key) override; + OptRef lifetimeCallbacks() override; + + // Envoy::Http::ConnectionPool::ConnectionLifetimeCallbacks + void onConnectionOpen(Envoy::Http::ConnectionPool::Instance& pool, + std::vector& hash_key, + const Network::Connection& connection) override; + + void onConnectionDraining(Envoy::Http::ConnectionPool::Instance& pool, + std::vector& hash_key, + const Network::Connection& connection) override; private: + struct ConnectionInfo { + Envoy::Http::ConnectionPool::Instance* pool_; // Not a ref to allow assignment in remove(). + const Network::Connection* connection_; // Not a ref to allow assignment in remove(). + }; + struct LookupKey { + const std::vector hash_key_; + const Network::Address::Instance& peer_address_; + bool operator==(const LookupKey& rhs) const { + return std::tie(hash_key_, peer_address_) == std::tie(rhs.hash_key_, rhs.peer_address_); + } + }; + struct LookupKeyHash { + size_t operator()(const LookupKey& lookup_key) const { + return std::hash{}(lookup_key.peer_address_.asString()); + } + }; + + absl::flat_hash_map, LookupKeyHash> connection_info_map_; + const Cluster& cluster_; }; @@ -109,6 +145,9 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, const envoy::config::endpoint::v3::LbEndpoint dummy_lb_endpoint_; const LocalInfo::LocalInfo& local_info_; + // True if H2 and H3 connections may be reused across different origins. + const bool allow_coalesced_connections_; + mutable absl::Mutex host_map_lock_; HostInfoMap host_map_ ABSL_GUARDED_BY(host_map_lock_); diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index 0edbfdedc93b..a4eb2352df6c 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -188,6 +188,17 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } + // Pool selection not implemented. + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext* /*context*/, + const Upstream::Host& /*host*/, + std::vector& /*hash_key*/) override { + return absl::nullopt; + } + // Lifetime tracking not implemented. + OptRef lifetimeCallbacks() override { + return {}; + } private: const SlotArraySharedPtr slot_array_; diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index f7daecf1ab23..ceb356d83150 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( deps = [ ":dns_cache_impl", "//source/common/protobuf", + "//source/server:factory_context_base_impl_lib", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -43,11 +44,12 @@ envoy_cc_library( "//envoy/network:dns_interface", "//envoy/thread_local:thread_local_interface", "//source/common/common:cleanup_lib", + "//source/common/common:dns_utils_lib", "//source/common/common:key_value_store_lib", "//source/common/config:utility_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", - "//source/common/upstream:upstream_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 4500341592e7..2694dd61d833 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -25,6 +25,12 @@ class DnsHostInfo { */ virtual Network::Address::InstanceConstSharedPtr address() const PURE; + /** + * Returns the host's currently resolved address. These addresses may change periodically due to + * async re-resolution. + */ + virtual std::vector addressList() const PURE; + /** * Returns the host that was actually resolved via DNS. If port was originally specified it will * be stripped from this return value. diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 17e6f2b0a7cc..82ce6a295779 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -2,15 +2,14 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "source/common/common/dns_utils.h" #include "source/common/common/stl_helpers.h" #include "source/common/config/utility.h" #include "source/common/http/utility.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/utility.h" -// TODO(mattklein123): Move DNS family helpers to a smaller include. -#include "source/common/upstream/upstream_impl.h" - namespace Envoy { namespace Extensions { namespace Common { @@ -19,9 +18,10 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) - : main_thread_dispatcher_(context.mainThreadDispatcher()), - dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), - resolver_(selectDnsResolver(config, main_thread_dispatcher_)), + : main_thread_dispatcher_(context.mainThreadDispatcher()), config_(config), + random_generator_(context.api().randomGenerator()), + dns_lookup_family_(DnsUtils::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), + resolver_(selectDnsResolver(config, main_thread_dispatcher_, context)), tls_slot_(context.threadLocal()), scope_(context.scope().createScope(fmt::format("dns_cache.{}.", config.name()))), stats_(generateDnsCacheStats(*scope_)), @@ -29,10 +29,6 @@ DnsCacheImpl::DnsCacheImpl( config.dns_cache_circuit_breaker()), refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)), timeout_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_query_timeout, 5000)), - failure_backoff_strategy_( - Config::Utility::prepareDnsRefreshStrategy< - envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>( - config, refresh_interval_.count(), context.api().randomGenerator())), file_system_(context.api().fileSystem()), validation_visitor_(context.messageValidationVisitor()), host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)), @@ -73,25 +69,12 @@ DnsCacheImpl::~DnsCacheImpl() { Network::DnsResolverSharedPtr DnsCacheImpl::selectDnsResolver( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config, - Event::Dispatcher& main_thread_dispatcher) { - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - std::vector resolvers; - if (config.has_dns_resolution_config()) { - dns_resolver_options.CopyFrom(config.dns_resolution_config().dns_resolver_options()); - if (!config.dns_resolution_config().resolvers().empty()) { - const auto& resolver_addrs = config.dns_resolution_config().resolvers(); - resolvers.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); - } - } - } else { - // Field bool `use_tcp_for_dns_lookups` will be deprecated in future. To be backward - // compatible utilize config.use_tcp_for_dns_lookups() if `config.dns_resolution_config` - // is not set. - dns_resolver_options.set_use_tcp_for_dns_lookups(config.use_tcp_for_dns_lookups()); - } - return main_thread_dispatcher.createDnsResolver(resolvers, dns_resolver_options); + Event::Dispatcher& main_thread_dispatcher, Server::Configuration::FactoryContextBase& context) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDnsResolverFactoryFromProto(config, typed_dns_resolver_config); + return dns_resolver_factory.createDnsResolver(main_thread_dispatcher, context.api(), + typed_dns_resolver_config); } DnsCacheStats DnsCacheImpl::generateDnsCacheStats(Stats::Scope& scope) { @@ -230,7 +213,7 @@ void DnsCacheImpl::onResolveTimeout(const std::string& host) { ASSERT(main_thread_dispatcher_.isThreadSafe()); auto& primary_host = getPrimaryHost(host); - ENVOY_LOG(debug, "host='{}' resolution timeout", host); + ENVOY_LOG_EVENT(debug, "dns_cache_resolve_timeout", "host='{}' resolution timeout", host); stats_.dns_query_timeout_.inc(); primary_host.active_query_->cancel(Network::ActiveDnsQuery::CancelReason::Timeout); finishResolve(host, Network::DnsResolver::ResolutionStatus::Failure, {}); @@ -247,8 +230,8 @@ void DnsCacheImpl::onReResolve(const std::string& host) { const std::chrono::steady_clock::duration now_duration = main_thread_dispatcher_.timeSource().monotonicTime().time_since_epoch(); auto last_used_time = primary_host.host_info_->lastUsedTime(); - ENVOY_LOG(debug, "host='{}' TTL check: now={} last_used={}", host, now_duration.count(), - last_used_time.count()); + ENVOY_LOG(debug, "host='{}' TTL check: now={} last_used={} TTL {}", host, now_duration.count(), + last_used_time.count(), host_ttl_.count()); if ((now_duration - last_used_time) > host_ttl_) { ENVOY_LOG(debug, "host='{}' TTL expired, removing", host); // If the host has no address then that means that the DnsCacheImpl has never @@ -346,6 +329,7 @@ void DnsCacheImpl::finishResolve(const std::string& host, ? Network::Utility::getAddressWithPort(*(response.front().address_), primary_host_info->port_) : nullptr; + auto address_list = DnsUtils::generateAddressList(response, primary_host_info->port_); // Only the change the address if: // 1) The new address is valid && @@ -356,45 +340,51 @@ void DnsCacheImpl::finishResolve(const std::string& host, // resolution failure. bool address_changed = false; auto current_address = primary_host_info->host_info_->address(); - if (new_address != nullptr && (current_address == nullptr || *current_address != *new_address)) { - ENVOY_LOG(debug, "host '{}' address has changed", host); - primary_host_info->host_info_->setAddress(new_address); - runAddUpdateCallbacks(host, primary_host_info->host_info_); - address_changed = true; - stats_.host_address_changed_.inc(); - } if (!resolution_time.has_value()) { resolution_time = main_thread_dispatcher_.timeSource().monotonicTime(); } + std::chrono::seconds dns_ttl = + std::chrono::duration_cast(refresh_interval_); if (new_address) { // Update the cache entry and staleness any time the ttl changes. if (!from_cache) { - addCacheEntry(host, new_address, response.front().ttl_); + addCacheEntry(host, new_address, address_list, response.front().ttl_); + } + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.use_dns_ttl")) { + // Arbitrarily cap DNS re-resolution at 5s to avoid constant DNS queries. + dns_ttl = std::max(std::chrono::seconds(5), response.front().ttl_); } - primary_host_info->host_info_->updateStale(resolution_time.value(), response.front().ttl_); + primary_host_info->host_info_->updateStale(resolution_time.value(), dns_ttl); + } + + if (new_address != nullptr && + (current_address == nullptr || *current_address != *new_address || + DnsUtils::listChanged(address_list, primary_host_info->host_info_->addressList()))) { + ENVOY_LOG(debug, "host '{}' address has changed", host); + primary_host_info->host_info_->setAddresses(new_address, std::move(address_list)); + + runAddUpdateCallbacks(host, primary_host_info->host_info_); + address_changed = true; + stats_.host_address_changed_.inc(); } if (first_resolve) { primary_host_info->host_info_->setFirstResolveComplete(); } - if (first_resolve || address_changed) { - // TODO(alyssawilk) only notify threads of stale results after a resolution - // timeout. + if (first_resolve || (address_changed && !primary_host_info->host_info_->isStale())) { notifyThreads(host, primary_host_info->host_info_); } // Kick off the refresh timer. - // TODO(mattklein123): Consider jitter here. It may not be necessary since the initial host - // is populated dynamically. - // TODO(alyssawilk) also consider TTL here. if (status == Network::DnsResolver::ResolutionStatus::Success) { - failure_backoff_strategy_->reset(); - primary_host_info->refresh_timer_->enableTimer(refresh_interval_); + primary_host_info->failure_backoff_strategy_->reset( + std::chrono::duration_cast(dns_ttl).count()); + primary_host_info->refresh_timer_->enableTimer(dns_ttl); ENVOY_LOG(debug, "DNS refresh rate reset for host '{}', refresh rate {} ms", host, - refresh_interval_.count()); + dns_ttl.count() * 1000); } else { - const uint64_t refresh_interval = failure_backoff_strategy_->nextBackOffMs(); + const uint64_t refresh_interval = primary_host_info->failure_backoff_strategy_->nextBackOffMs(); primary_host_info->refresh_timer_->enableTimer(std::chrono::milliseconds(refresh_interval)); ENVOY_LOG(debug, "DNS refresh rate reset for host '{}', (failure) refresh rate {} ms", host, refresh_interval); @@ -453,7 +443,11 @@ DnsCacheImpl::PrimaryHostInfo::PrimaryHostInfo(DnsCacheImpl& parent, refresh_timer_(parent.main_thread_dispatcher_.createTimer(refresh_timer_cb)), timeout_timer_(parent.main_thread_dispatcher_.createTimer(timeout_timer_cb)), host_info_(std::make_shared(parent.main_thread_dispatcher_.timeSource(), - host_to_resolve, is_ip_address)) { + host_to_resolve, is_ip_address)), + failure_backoff_strategy_( + Config::Utility::prepareDnsRefreshStrategy< + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>( + parent_.config_, parent_.refresh_interval_.count(), parent_.random_generator_)) { parent_.stats_.host_added_.inc(); parent_.stats_.num_hosts_.inc(); } @@ -463,17 +457,25 @@ DnsCacheImpl::PrimaryHostInfo::~PrimaryHostInfo() { parent_.stats_.num_hosts_.dec(); } -void DnsCacheImpl::addCacheEntry(const std::string& host, - const Network::Address::InstanceConstSharedPtr& address, - const std::chrono::seconds ttl) { +void DnsCacheImpl::addCacheEntry( + const std::string& host, const Network::Address::InstanceConstSharedPtr& address, + const std::vector& address_list, + const std::chrono::seconds ttl) { if (!key_value_store_) { return; } MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); uint64_t seconds_since_epoch = std::chrono::duration_cast(now.time_since_epoch()).count(); - const std::string value = - absl::StrCat(address->asString(), "|", ttl.count(), "|", seconds_since_epoch); + std::string value; + if (address_list.empty()) { + value = absl::StrCat(address->asString(), "|", ttl.count(), "|", seconds_since_epoch); + } else { + for (auto& addr : address_list) { + value += absl::StrCat((value.empty() ? "" : "\n"), addr->asString(), "|", ttl.count(), "|", + seconds_since_epoch); + } + } key_value_store_->addOrUpdate(host, value); } @@ -484,6 +486,40 @@ void DnsCacheImpl::removeCacheEntry(const std::string& host) { key_value_store_->remove(host); } +absl::optional +DnsCacheImpl::parseValue(absl::string_view value, absl::optional& resolution_time) { + Network::Address::InstanceConstSharedPtr address; + const auto parts = StringUtil::splitToken(value, "|"); + std::chrono::seconds ttl(0); + if (parts.size() != 3) { + ENVOY_LOG(warn, "Incorrect number of tokens in the cache line"); + return {}; + } + address = Network::Utility::parseInternetAddressAndPortNoThrow(std::string(parts[0])); + if (address == nullptr) { + ENVOY_LOG(warn, "{} is not a valid address", parts[0]); + } + uint64_t ttl_int; + if (absl::SimpleAtoi(parts[1], &ttl_int) && ttl_int != 0) { + ttl = std::chrono::seconds(ttl_int); + } else { + ENVOY_LOG(warn, "{} is not a valid ttl", parts[1]); + } + uint64_t epoch_int; + if (absl::SimpleAtoi(parts[2], &epoch_int)) { + MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); + const std::chrono::seconds seconds_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()); + resolution_time = main_thread_dispatcher_.timeSource().monotonicTime() - + (seconds_since_epoch - std::chrono::seconds(epoch_int)); + } + if (address == nullptr || ttl == std::chrono::seconds(0) || !resolution_time.has_value()) { + ENVOY_LOG(warn, "Unable to parse cache line '{}'", value); + return {}; + } + return Network::DnsResponse(address, ttl); +} + void DnsCacheImpl::loadCacheEntries( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) { if (!config.has_key_value_config()) { @@ -494,42 +530,23 @@ void DnsCacheImpl::loadCacheEntries( key_value_store_ = factory.createStore(config.key_value_config(), validation_visitor_, main_thread_dispatcher_, file_system_); KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { - Network::Address::InstanceConstSharedPtr address; - const auto parts = StringUtil::splitToken(value, "|"); - std::chrono::seconds ttl(0); absl::optional resolution_time; - if (parts.size() == 3) { - address = Network::Utility::parseInternetAddressAndPortNoThrow(std::string(parts[0])); - if (address == nullptr) { - ENVOY_LOG(warn, "{} is not a valid address", parts[0]); + std::list responses; + const auto addresses = StringUtil::splitToken(value, "\n"); + for (absl::string_view address_line : addresses) { + absl::optional response = parseValue(address_line, resolution_time); + if (!response.has_value()) { + return KeyValueStore::Iterate::Break; } - uint64_t ttl_int; - if (absl::SimpleAtoi(parts[1], &ttl_int) && ttl_int != 0) { - ttl = std::chrono::seconds(ttl_int); - } else { - ENVOY_LOG(warn, "{} is not a valid ttl", parts[1]); - } - uint64_t epoch_int; - if (absl::SimpleAtoi(parts[2], &epoch_int)) { - MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); - const std::chrono::seconds seconds_since_epoch = - std::chrono::duration_cast(now.time_since_epoch()); - resolution_time = main_thread_dispatcher_.timeSource().monotonicTime() - - (seconds_since_epoch - std::chrono::seconds(epoch_int)); - } - } else { - ENVOY_LOG(warn, "Incorrect number of tokens in the cache line"); + responses.emplace_back(response.value()); } - if (address == nullptr || ttl == std::chrono::seconds(0) || !resolution_time.has_value()) { - ENVOY_LOG(warn, "Unable to parse cache line '{}'", value); + if (responses.empty()) { return KeyValueStore::Iterate::Break; } - stats_.cache_load_.inc(); - std::list response; - createHost(key, address->ip()->port()); - response.emplace_back(Network::DnsResponse(address, ttl)); - finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), + createHost(key, responses.front().address_->ip()->port()); + finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(responses), resolution_time); + stats_.cache_load_.inc(); return KeyValueStore::Iterate::Continue; }; key_value_store_->iterate(load); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index d10c88bd4feb..a0534ac18e78 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -52,7 +52,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable addressList() const override { + std::vector ret; + absl::ReaderMutexLock lock{&resolve_lock_}; + ret = address_list_; + return ret; + } + const std::string& resolvedHost() const override { return resolved_host_; } bool isIpAddress() const override { return is_ip_address_; } void touch() final { last_used_time_ = time_source_.monotonicTime().time_since_epoch(); } void updateStale(MonotonicTime resolution_time, std::chrono::seconds ttl) { stale_at_time_ = resolution_time + ttl; } + bool isStale() { + return time_source_.monotonicTime() > static_cast(stale_at_time_); + } - void setAddress(Network::Address::InstanceConstSharedPtr address) { + void setAddresses(Network::Address::InstanceConstSharedPtr address, + std::vector&& list) { absl::WriterMutexLock lock{&resolve_lock_}; first_resolve_complete_ = true; address_ = address; + address_list_ = std::move(list); } + std::chrono::steady_clock::duration lastUsedTime() const { return last_used_time_.load(); } bool firstResolveComplete() const { @@ -140,6 +155,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable + address_list_ ABSL_GUARDED_BY(resolve_lock_); // Using std::chrono::steady_clock::duration is required for compilation within an atomic vs. // using MonotonicTime. @@ -160,6 +177,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable&& response, absl::optional resolution_time = {}); @@ -192,13 +211,18 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable& address_list, const std::chrono::seconds ttl); void removeCacheEntry(const std::string& host); void loadCacheEntries( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); PrimaryHostInfo* createHost(const std::string& host, uint16_t default_port); + absl::optional parseValue(absl::string_view value, + absl::optional& resolution_time); Event::Dispatcher& main_thread_dispatcher_; + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config_; + Random::RandomGenerator& random_generator_; const Network::DnsLookupFamily dns_lookup_family_; const Network::DnsResolverSharedPtr resolver_; ThreadLocal::TypedSlot tls_slot_; @@ -212,7 +236,6 @@ class DnsCacheImpl : public DnsCache, Logger::Loggablesecond.cache_; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 582c8ea4f701..9ec6c434e468 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -4,6 +4,7 @@ #include "envoy/server/factory_context.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "source/server/factory_context_base_impl.h" #include "absl/container/flat_hash_map.h" @@ -31,8 +32,7 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { DnsCacheSharedPtr cache_; }; - Server::Configuration::FactoryContextBase& context_; - + Server::FactoryContextBaseImpl context_; absl::flat_hash_map caches_; }; @@ -44,7 +44,7 @@ class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { DnsCacheManagerSharedPtr get() override; private: - Server::Configuration::FactoryContextBase& context_; + Server::FactoryContextBaseImpl context_; }; } // namespace DynamicForwardProxy diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 49cf9725d091..93f33d4a6570 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -82,6 +82,7 @@ envoy_cc_library( "//source/common/http:message_lib", "//source/common/http:utility_lib", "//source/common/tracing:http_tracer_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/extensions/common/wasm/ext:declare_property_cc_proto", "//source/extensions/common/wasm/ext:envoy_null_vm_wasm_api", "//source/extensions/filters/common/expr:context_lib", diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index 07447110f210..8e703227657c 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -974,19 +974,15 @@ WasmResult Context::grpcCall(std::string_view grpc_service, std::string_view ser uint32_t* token_ptr) { GrpcService service_proto; if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.wasm_cluster_name_envoy_grpc")) { - auto cluster_name = std::string(grpc_service.substr(0, grpc_service.size())); - const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_name); - if (thread_local_cluster == nullptr) { - // TODO(shikugawa): The reason to keep return status as `BadArgument` is not to force - // callers to change their own codebase with ABI 0.1.x. We should treat this failure as - // `BadArgument` after ABI 0.2.x will have released. - return WasmResult::ParseFailure; - } - service_proto.mutable_envoy_grpc()->set_cluster_name(cluster_name); - } else { + auto cluster_name = std::string(grpc_service.substr(0, grpc_service.size())); + const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_name); + if (thread_local_cluster == nullptr) { + // TODO(shikugawa): The reason to keep return status as `BadArgument` is not to force + // callers to change their own codebase with ABI 0.1.x. We should treat this failure as + // `BadArgument` after ABI 0.2.x will have released. return WasmResult::ParseFailure; } + service_proto.mutable_envoy_grpc()->set_cluster_name(cluster_name); } uint32_t token = wasm()->nextGrpcCallId(); auto& handler = grpc_call_request_[token]; @@ -1023,19 +1019,15 @@ WasmResult Context::grpcStream(std::string_view grpc_service, std::string_view s uint32_t* token_ptr) { GrpcService service_proto; if (!service_proto.ParseFromArray(grpc_service.data(), grpc_service.size())) { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.wasm_cluster_name_envoy_grpc")) { - auto cluster_name = std::string(grpc_service.substr(0, grpc_service.size())); - const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_name); - if (thread_local_cluster == nullptr) { - // TODO(shikugawa): The reason to keep return status as `BadArgument` is not to force - // callers to change their own codebase with ABI 0.1.x. We should treat this failure as - // `BadArgument` after ABI 0.2.x will have released. - return WasmResult::ParseFailure; - } - service_proto.mutable_envoy_grpc()->set_cluster_name(cluster_name); - } else { + auto cluster_name = std::string(grpc_service.substr(0, grpc_service.size())); + const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_name); + if (thread_local_cluster == nullptr) { + // TODO(shikugawa): The reason to keep return status as `BadArgument` is not to force + // callers to change their own codebase with ABI 0.1.x. We should treat this failure as + // `BadArgument` after ABI 0.2.x will have released. return WasmResult::ParseFailure; } + service_proto.mutable_envoy_grpc()->set_cluster_name(cluster_name); } uint32_t token = wasm()->nextGrpcStreamId(); auto& handler = grpc_stream_[token]; diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 5afbb8a89801..4fc6c6a36caf 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -6,6 +6,7 @@ #include "envoy/event/deferred_deletable.h" #include "source/common/common/logger.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/extensions/common/wasm/plugin.h" #include "source/extensions/common/wasm/stats_handler.h" @@ -73,12 +74,12 @@ void Wasm::initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifi } Wasm::Wasm(WasmConfig& config, absl::string_view vm_key, const Stats::ScopeSharedPtr& scope, - Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) + Api::Api& api, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher) : WasmBase( createWasmVm(config.config().vm_config().runtime()), config.config().vm_config().vm_id(), MessageUtil::anyToBytes(config.config().vm_config().configuration()), toStdStringView(vm_key), config.environmentVariables(), config.allowedCapabilities()), - scope_(scope), stat_name_pool_(scope_->symbolTable()), + scope_(scope), api_(api), stat_name_pool_(scope_->symbolTable()), custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(LifecycleStatsHandler( @@ -94,7 +95,8 @@ Wasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher) "envoy.wasm.runtime.", toAbslStringView(base_wasm_handle->wasm()->wasm_vm()->runtime()))); }), - scope_(getWasm(base_wasm_handle)->scope_), stat_name_pool_(scope_->symbolTable()), + scope_(getWasm(base_wasm_handle)->scope_), api_(getWasm(base_wasm_handle)->api_), + stat_name_pool_(scope_->symbolTable()), custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), @@ -174,8 +176,11 @@ Word resolve_dns(Word dns_address_ptr, Word dns_address_size, Word token_ptr) { root_context->onResolveDns(token, status, std::move(response)); }; if (!context->wasm()->dnsResolver()) { - context->wasm()->dnsResolver() = context->wasm()->dispatcher().createDnsResolver( - {}, envoy::config::core::v3::DnsResolverOptions()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDefaultDnsResolverFactory(typed_dns_resolver_config); + context->wasm()->dnsResolver() = dns_resolver_factory.createDnsResolver( + context->wasm()->dispatcher(), context->wasm()->api(), typed_dns_resolver_config); } context->wasm()->dnsResolver()->resolve(std::string(address.value()), Network::DnsLookupFamily::Auto, callback); @@ -249,12 +254,12 @@ void setTimeOffsetForCodeCacheForTesting(MonotonicTime::duration d) { } static proxy_wasm::WasmHandleFactory -getWasmHandleFactory(WasmConfig& wasm_config, const Stats::ScopeSharedPtr& scope, +getWasmHandleFactory(WasmConfig& wasm_config, const Stats::ScopeSharedPtr& scope, Api::Api& api, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher, Server::ServerLifecycleNotifier& lifecycle_notifier) { - return [&wasm_config, &scope, &cluster_manager, &dispatcher, + return [&wasm_config, &scope, &api, &cluster_manager, &dispatcher, &lifecycle_notifier](std::string_view vm_key) -> WasmHandleBaseSharedPtr { - auto wasm = std::make_shared(wasm_config, toAbslStringView(vm_key), scope, + auto wasm = std::make_shared(wasm_config, toAbslStringView(vm_key), scope, api, cluster_manager, dispatcher); wasm->initializeLifecycle(lifecycle_notifier); return std::static_pointer_cast(std::make_shared(std::move(wasm))); @@ -375,8 +380,9 @@ bool createWasm(const PluginSharedPtr& plugin, const Stats::ScopeSharedPtr& scop auto vm_key = proxy_wasm::makeVmKey(vm_config.vm_id(), MessageUtil::anyToBytes(vm_config.configuration()), code); - auto complete_cb = [cb, vm_key, plugin, scope, &cluster_manager, &dispatcher, &lifecycle_notifier, - create_root_context_for_testing, &stats_handler](std::string code) -> bool { + auto complete_cb = [cb, vm_key, plugin, scope, &api, &cluster_manager, &dispatcher, + &lifecycle_notifier, create_root_context_for_testing, + &stats_handler](std::string code) -> bool { if (code.empty()) { cb(nullptr); return false; @@ -385,7 +391,7 @@ bool createWasm(const PluginSharedPtr& plugin, const Stats::ScopeSharedPtr& scop auto config = plugin->wasmConfig(); auto wasm = proxy_wasm::createWasm( vm_key, code, plugin, - getWasmHandleFactory(config, scope, cluster_manager, dispatcher, lifecycle_notifier), + getWasmHandleFactory(config, scope, api, cluster_manager, dispatcher, lifecycle_notifier), getWasmHandleCloneFactory(dispatcher, create_root_context_for_testing), config.config().vm_config().allow_precompiled()); Stats::ScopeSharedPtr create_wasm_stats_scope = stats_handler.lockAndCreateStats(scope); diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index 3ef156ca77f7..cc0de5d90821 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -41,12 +41,13 @@ class WasmHandle; class Wasm : public WasmBase, Logger::Loggable { public: Wasm(WasmConfig& config, absl::string_view vm_key, const Stats::ScopeSharedPtr& scope, - Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher); + Api::Api& api, Upstream::ClusterManager& cluster_manager, Event::Dispatcher& dispatcher); Wasm(std::shared_ptr other, Event::Dispatcher& dispatcher); ~Wasm() override; Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } Event::Dispatcher& dispatcher() { return dispatcher_; } + Api::Api& api() { return api_; } Context* getRootContext(const std::shared_ptr& plugin, bool allow_closed) { return static_cast(WasmBase::getRootContext(plugin, allow_closed)); } @@ -98,6 +99,7 @@ class Wasm : public WasmBase, Logger::Loggable { proxy_wasm::WasmCallVoid<2> on_stats_update_; Stats::ScopeSharedPtr scope_; + Api::Api& api_; Stats::StatNamePool stat_name_pool_; const Stats::StatName custom_stat_namespace_; Upstream::ClusterManager& cluster_manager_; diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 7e220b0a4a72..f9acb724d964 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -196,6 +196,7 @@ EXTENSIONS = { "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config", "envoy.transport_sockets.starttls": "//source/extensions/transport_sockets/starttls:config", + "envoy.transport_sockets.tcp_stats": "//source/extensions/transport_sockets/tcp_stats:config", # # Retry host predicates @@ -303,6 +304,16 @@ EXTENSIONS = { # "envoy.rbac.matchers.upstream_ip_port": "//source/extensions/filters/common/rbac/matchers:upstream_ip_port_lib", + + # + # DNS Resolver + # + + # c-ares DNS resolver extension is recommended to be enabled to maintain the legacy DNS resolving behavior. + "envoy.network.dns_resolver.cares": "//source/extensions/network/dns_resolver/cares:config", + + # apple DNS resolver extension is only needed in MacOS build plus one want to use apple library for DNS resolving. + "envoy.network.dns_resolver.apple": "//source/extensions/network/dns_resolver/apple:config", } # These can be changed to ["//visibility:public"], for downstream builds which diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index d8cec006bd6d..4df99902fad6 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -633,6 +633,12 @@ envoy.transport_sockets.tap: - envoy.transport_sockets.upstream security_posture: requires_trusted_downstream_and_upstream status: alpha +envoy.transport_sockets.tcp_stats: + categories: + - envoy.transport_sockets.downstream + - envoy.transport_sockets.upstream + security_posture: robust_to_untrusted_downstream_and_upstream + status: alpha envoy.transport_sockets.tls: categories: - envoy.transport_sockets.downstream @@ -704,6 +710,16 @@ envoy.key_value.file_based: - envoy.common.key_value security_posture: data_plane_agnostic status: alpha +envoy.network.dns_resolver.cares: + categories: + - envoy.network.dns_resolver + security_posture: robust_to_untrusted_downstream_and_upstream + status: stable +envoy.network.dns_resolver.apple: + categories: + - envoy.network.dns_resolver + security_posture: robust_to_untrusted_downstream_and_upstream + status: stable envoy.rbac.matchers.upstream_ip_port: categories: - envoy.rbac.matchers diff --git a/source/extensions/filters/common/expr/BUILD b/source/extensions/filters/common/expr/BUILD index dc146b13b9a6..cecb210da8a6 100644 --- a/source/extensions/filters/common/expr/BUILD +++ b/source/extensions/filters/common/expr/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( ":context_lib", "//source/common/http:utility_lib", "//source/common/protobuf", + "@com_google_cel_cpp//eval/public:activation", "@com_google_cel_cpp//eval/public:builtin_func_registrar", "@com_google_cel_cpp//eval/public:cel_expr_builder_factory", "@com_google_cel_cpp//eval/public:cel_expression", diff --git a/source/extensions/filters/common/expr/evaluator.h b/source/extensions/filters/common/expr/evaluator.h index 2e00d620f9e8..36926c218132 100644 --- a/source/extensions/filters/common/expr/evaluator.h +++ b/source/extensions/filters/common/expr/evaluator.h @@ -6,6 +6,7 @@ #include "source/common/protobuf/protobuf.h" #include "source/extensions/filters/common/expr/context.h" +#include "eval/public/activation.h" #include "eval/public/cel_expression.h" #include "eval/public/cel_value.h" diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 96545dd83a95..ee30b3b70fc9 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -12,6 +12,7 @@ #include "envoy/tracing/http_tracer.h" #include "source/common/http/headers.h" +#include "source/common/http/utility.h" #include "source/common/singleton/const_singleton.h" namespace Envoy { @@ -32,6 +33,17 @@ struct TracingConstantValues { using TracingConstants = ConstSingleton; +/** + * Possible constant response code details values for a check call. + */ +struct ResponseCodeDetailsValues { + // The ext_authz filter denied the downstream request/connection. + const std::string AuthzDenied = "ext_authz_denied"; + // The ext_authz filter encountered a failure, and was configured to fail-closed. + const std::string AuthzError = "ext_authz_error"; +}; +using ResponseCodeDetails = ConstSingleton; + /** * Constant auth related HTTP headers. All lower case. This group of headers can * contain prefix override headers. @@ -84,6 +96,11 @@ struct Response { // A set of HTTP headers consumed by the authorization server, will be removed // from the request to the upstream server. std::vector headers_to_remove; + // A set of query string parameters to be set (possibly overwritten) on the + // request to the upstream server. + Http::Utility::QueryParamsVector query_parameters_to_set; + // A set of query string parameters to remove from the request to the upstream server. + std::vector query_parameters_to_remove; // Optional http body used only on denied response. std::string body; // Optional http status used only on denied response. diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index dc730cdcb6dc..eda59d151500 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -57,7 +57,17 @@ void GrpcClientImpl::onSuccess(std::unique_ptrheaders_to_remove.push_back(Http::LowerCaseString(header)); } } - + if (response->ok_response().query_parameters_to_set_size() > 0) { + for (const auto& query_parameter : response->ok_response().query_parameters_to_set()) { + authz_response->query_parameters_to_set.push_back( + std::pair(query_parameter.key(), query_parameter.value())); + } + } + if (response->ok_response().query_parameters_to_remove_size() > 0) { + for (const auto& key : response->ok_response().query_parameters_to_remove()) { + authz_response->query_parameters_to_remove.push_back(key); + } + } // These two vectors hold header overrides of encoded response headers. if (response->ok_response().response_headers_to_add_size() > 0) { for (const auto& header : response->ok_response().response_headers_to_add()) { diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 3cf233d66164..6ea634396894 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -38,6 +38,8 @@ const Response& errorResponse() { Http::HeaderVector{}, Http::HeaderVector{}, {{}}, + Http::Utility::QueryParamsVector{}, + {}, EMPTY_STRING, Http::Code::Forbidden, ProtobufWkt::Struct{}}); @@ -350,9 +352,17 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), config_->dynamicMetadataMatchers(), - Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, - std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK, + Response{CheckStatus::OK, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + Http::HeaderVector{}, + std::move(headers_to_remove), + Http::Utility::QueryParamsVector{}, + {}, + EMPTY_STRING, + Http::Code::OK, ProtobufWkt::Struct{}}}; return std::move(ok.response_); } @@ -370,6 +380,8 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { Http::HeaderVector{}, Http::HeaderVector{}, {{}}, + Http::Utility::QueryParamsVector{}, + {}, message->bodyAsString(), static_cast(status_code), ProtobufWkt::Struct{}}}; diff --git a/source/extensions/filters/common/original_src/original_src_socket_option.h b/source/extensions/filters/common/original_src/original_src_socket_option.h index 8e86dc87d719..8e1ffc16b020 100644 --- a/source/extensions/filters/common/original_src/original_src_socket_option.h +++ b/source/extensions/filters/common/original_src/original_src_socket_option.h @@ -36,6 +36,7 @@ class OriginalSrcSocketOption : public Network::Socket::Option { absl::optional
getOptionDetails(const Network::Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; + bool isSupported() const override { return true; } private: Network::Address::InstanceConstSharedPtr src_address_; diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD index 5192e11d74f9..a687266ec291 100644 --- a/source/extensions/filters/http/admission_control/BUILD +++ b/source/extensions/filters/http/admission_control/BUILD @@ -31,7 +31,7 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -45,6 +45,6 @@ envoy_cc_extension( "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index 3ceee04889ec..dd9a3579cd9a 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -6,7 +6,7 @@ #include #include "envoy/common/random_generator.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" #include "envoy/runtime/runtime.h" diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h index 4d921eddbc10..22f976768316 100644 --- a/source/extensions/filters/http/admission_control/admission_control.h +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -6,7 +6,7 @@ #include "envoy/common/random_generator.h" #include "envoy/common/time.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" #include "envoy/http/codes.h" #include "envoy/http/filter.h" #include "envoy/runtime/runtime.h" @@ -45,7 +45,7 @@ struct AdmissionControlStats { }; using AdmissionControlProto = - envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl; + envoy::extensions::filters::http::admission_control::v3::AdmissionControl; /** * Configuration for the admission control filter. diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc index e28e0445b643..1f08de2f0039 100644 --- a/source/extensions/filters/http/admission_control/config.cc +++ b/source/extensions/filters/http/admission_control/config.cc @@ -1,8 +1,8 @@ #include "source/extensions/filters/http/admission_control/config.h" #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "envoy/registry/registry.h" #include "source/common/common/enum_to_int.h" @@ -18,7 +18,7 @@ namespace AdmissionControl { static constexpr std::chrono::seconds defaultSamplingWindow{30}; Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, + const envoy::extensions::filters::http::admission_control::v3::AdmissionControl& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { if (config.has_sr_threshold() && config.sr_threshold().default_value().value() < 1.0) { diff --git a/source/extensions/filters/http/admission_control/config.h b/source/extensions/filters/http/admission_control/config.h index e25289f6e76b..ab8ecb79f5a1 100644 --- a/source/extensions/filters/http/admission_control/config.h +++ b/source/extensions/filters/http/admission_control/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -15,13 +15,12 @@ namespace AdmissionControl { */ class AdmissionControlFilterFactory : public Common::FactoryBase< - envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl> { + envoy::extensions::filters::http::admission_control::v3::AdmissionControl> { public: AdmissionControlFilterFactory() : FactoryBase("envoy.filters.http.admission_control") {} Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& - proto_config, + const envoy::extensions::filters::http::admission_control::v3::AdmissionControl& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/admission_control/evaluators/BUILD b/source/extensions/filters/http/admission_control/evaluators/BUILD index cddd0f2f0a43..450138656e27 100644 --- a/source/extensions/filters/http/admission_control/evaluators/BUILD +++ b/source/extensions/filters/http/admission_control/evaluators/BUILD @@ -21,6 +21,6 @@ envoy_cc_library( deps = [ "//envoy/grpc:status", "//source/common/common:enum_to_int", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h index 551975ead881..f55c2fce2464 100644 --- a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/admission_control/evaluators/response_evaluator.h" @@ -14,8 +14,8 @@ namespace AdmissionControl { class SuccessCriteriaEvaluator : public ResponseEvaluator { public: - using SuccessCriteria = envoy::extensions::filters::http::admission_control::v3alpha:: - AdmissionControl::SuccessCriteria; + using SuccessCriteria = + envoy::extensions::filters::http::admission_control::v3::AdmissionControl::SuccessCriteria; SuccessCriteriaEvaluator(const SuccessCriteria& evaluation_criteria); // ResponseEvaluator bool isHttpSuccess(uint64_t code) const override; diff --git a/source/extensions/filters/http/alternate_protocols_cache/config.cc b/source/extensions/filters/http/alternate_protocols_cache/config.cc index 295ea6453029..147658c79a14 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/config.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/config.cc @@ -20,8 +20,10 @@ Http::FilterFactoryCb AlternateProtocolsCacheFilterFactory::createFilterFactoryF FilterConfigSharedPtr filter_config( std::make_shared(proto_config, alternate_protocol_cache_manager_factory, context.mainThreadDispatcher().timeSource())); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamEncoderFilter(std::make_shared(filter_config)); + callbacks.addStreamEncoderFilter( + std::make_shared(filter_config, callbacks.dispatcher())); }; } diff --git a/source/extensions/filters/http/alternate_protocols_cache/filter.cc b/source/extensions/filters/http/alternate_protocols_cache/filter.cc index c1f8e3c9eadd..2d87b6fbef7a 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/filter.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/filter.cc @@ -24,17 +24,18 @@ FilterConfig::FilterConfig( : alternate_protocol_cache_manager_(alternate_protocol_cache_manager_factory.get()), proto_config_(proto_config), time_source_(time_source) {} -Http::AlternateProtocolsCacheSharedPtr FilterConfig::getAlternateProtocolCache() { +Http::AlternateProtocolsCacheSharedPtr +FilterConfig::getAlternateProtocolCache(Event::Dispatcher& dispatcher) { return proto_config_.has_alternate_protocols_cache_options() ? alternate_protocol_cache_manager_->getCache( - proto_config_.alternate_protocols_cache_options()) + proto_config_.alternate_protocols_cache_options(), dispatcher) : nullptr; } void Filter::onDestroy() {} -Filter::Filter(const FilterConfigSharedPtr& config) - : cache_(config->getAlternateProtocolCache()), time_source_(config->timeSource()) {} +Filter::Filter(const FilterConfigSharedPtr& config, Event::Dispatcher& dispatcher) + : cache_(config->getAlternateProtocolCache(dispatcher)), time_source_(config->timeSource()) {} Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { if (!cache_) { diff --git a/source/extensions/filters/http/alternate_protocols_cache/filter.h b/source/extensions/filters/http/alternate_protocols_cache/filter.h index f15bcee161d2..3135afe4bd9b 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/filter.h +++ b/source/extensions/filters/http/alternate_protocols_cache/filter.h @@ -23,7 +23,7 @@ class FilterConfig { TimeSource& time_source); // Returns the alternate protocols cache for the current thread. - Http::AlternateProtocolsCacheSharedPtr getAlternateProtocolCache(); + Http::AlternateProtocolsCacheSharedPtr getAlternateProtocolCache(Event::Dispatcher& dispatcher); TimeSource& timeSource() { return time_source_; } @@ -42,7 +42,7 @@ using FilterConfigSharedPtr = std::shared_ptr; class Filter : public Http::PassThroughEncoderFilter, Logger::Loggable { public: - explicit Filter(const FilterConfigSharedPtr& config); + Filter(const FilterConfigSharedPtr& config, Event::Dispatcher& thread_local_dispatcher); // Http::PassThroughEncoderFilter Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& header, diff --git a/source/extensions/filters/http/bandwidth_limit/BUILD b/source/extensions/filters/http/bandwidth_limit/BUILD index 3f3b5e3f7273..b2c1f4ccc49a 100644 --- a/source/extensions/filters/http/bandwidth_limit/BUILD +++ b/source/extensions/filters/http/bandwidth_limit/BUILD @@ -28,7 +28,7 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/common/stats:timespan_lib", "//source/extensions/filters/http/common:stream_rate_limiter_lib", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) @@ -41,6 +41,6 @@ envoy_cc_extension( "//envoy/http:filter_interface", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 6d831d6de890..7da418578629 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -8,7 +8,7 @@ #include "source/common/http/utility.h" #include "source/common/stats/timespan_impl.h" -using envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit; +using envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit; using Envoy::Extensions::HttpFilters::Common::StreamRateLimiter; namespace Envoy { diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h index f5bac4642642..876ef673e39f 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h @@ -5,7 +5,7 @@ #include #include -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" #include "envoy/http/filter.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" @@ -55,12 +55,11 @@ struct BandwidthLimitStats { class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { public: using EnableMode = - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit_EnableMode; + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit_EnableMode; - FilterConfig( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& config, - Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, - bool per_route = false); + FilterConfig(const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& config, + Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, + bool per_route = false); ~FilterConfig() override = default; Runtime::Loader& runtime() { return runtime_; } BandwidthLimitStats& stats() const { return stats_; } diff --git a/source/extensions/filters/http/bandwidth_limit/config.cc b/source/extensions/filters/http/bandwidth_limit/config.cc index 95b62c134175..40f59d1da225 100644 --- a/source/extensions/filters/http/bandwidth_limit/config.cc +++ b/source/extensions/filters/http/bandwidth_limit/config.cc @@ -13,7 +13,7 @@ namespace HttpFilters { namespace BandwidthLimitFilter { Http::FilterFactoryCb BandwidthLimitFilterConfig::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigSharedPtr filter_config = std::make_shared( proto_config, context.scope(), context.runtime(), context.timeSource()); @@ -24,7 +24,7 @@ Http::FilterFactoryCb BandwidthLimitFilterConfig::createFilterFactoryFromProtoTy Router::RouteSpecificFilterConfigConstSharedPtr BandwidthLimitFilterConfig::createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config, context.scope(), context.runtime(), context.timeSource(), true); diff --git a/source/extensions/filters/http/bandwidth_limit/config.h b/source/extensions/filters/http/bandwidth_limit/config.h index b29a3ac2320b..e167d3136e2a 100644 --- a/source/extensions/filters/http/bandwidth_limit/config.h +++ b/source/extensions/filters/http/bandwidth_limit/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.validate.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -15,19 +15,17 @@ namespace BandwidthLimitFilter { */ class BandwidthLimitFilterConfig : public Common::FactoryBase< - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit> { + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit> { public: BandwidthLimitFilterConfig() : FactoryBase("envoy.filters.http.bandwidth_limit") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& - proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit& - proto_config, + const envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit& proto_config, Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) override; }; diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 53219e59f389..5c31e78ff7cf 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -28,7 +28,7 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -77,7 +77,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -96,7 +96,7 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/protobuf", "@com_google_absl//absl/container:btree", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) @@ -116,6 +116,6 @@ envoy_cc_extension( deps = [ ":cache_filter_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cache/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 768fd0ae5b99..da0b86eb05ee 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -29,9 +29,9 @@ struct CacheResponseCodeDetailValues { using CacheResponseCodeDetails = ConstSingleton; -CacheFilter::CacheFilter( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, const std::string&, - Stats::Scope&, TimeSource& time_source, HttpCache& http_cache) +CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3::CacheConfig& config, + const std::string&, Stats::Scope&, TimeSource& time_source, + HttpCache& http_cache) : time_source_(time_source), cache_(http_cache), vary_allow_list_(config.allowed_vary_headers()) {} diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 418a770b9e2c..77e098a99d13 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -5,7 +5,7 @@ #include #include -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "source/common/common/logger.h" #include "source/extensions/filters/http/cache/cache_headers_utils.h" @@ -24,7 +24,7 @@ class CacheFilter : public Http::PassThroughFilter, public Logger::Loggable, public std::enable_shared_from_this { public: - CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + CacheFilter(const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Stats::Scope& scope, TimeSource& time_source, HttpCache& http_cache); // Http::StreamFilterBase diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h index 06737d7a2b2c..c5a219f0fedd 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.h +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/common/time.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "envoy/http/header_map.h" #include "source/common/common/matchers.h" diff --git a/source/extensions/filters/http/cache/config.cc b/source/extensions/filters/http/cache/config.cc index f1d713e8d1c1..33f719e47778 100644 --- a/source/extensions/filters/http/cache/config.cc +++ b/source/extensions/filters/http/cache/config.cc @@ -8,7 +8,7 @@ namespace HttpFilters { namespace Cache { Http::FilterFactoryCb CacheFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { const std::string type{TypeUtil::typeUrlToDescriptorFullName(config.typed_config().type_url())}; HttpCacheFactory* const http_cache_factory = diff --git a/source/extensions/filters/http/cache/config.h b/source/extensions/filters/http/cache/config.h index 341a054344d5..2f87062b86a4 100644 --- a/source/extensions/filters/http/cache/config.h +++ b/source/extensions/filters/http/cache/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.validate.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -11,13 +11,13 @@ namespace HttpFilters { namespace Cache { class CacheFilterFactory - : public Common::FactoryBase { + : public Common::FactoryBase { public: CacheFilterFactory() : FactoryBase("envoy.filters.http.cache") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config, + const envoy::extensions::filters::http::cache::v3::CacheConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 7438646649dd..47ab926e02bd 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -7,7 +7,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/time.h" #include "envoy/config/typed_config.h" -#include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/extensions/filters/http/cache/v3/cache.pb.h" #include "envoy/http/header_map.h" #include "source/common/common/assert.h" @@ -369,7 +369,7 @@ class HttpCacheFactory : public Config::TypedFactory { // Returns an HttpCache that will remain valid indefinitely (at least as long // as the calling CacheFilter). virtual HttpCache& - getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig& config) PURE; + getCache(const envoy::extensions::filters::http::cache::v3::CacheConfig& config) PURE; ~HttpCacheFactory() override = default; private: diff --git a/source/extensions/filters/http/cache/simple_http_cache/BUILD b/source/extensions/filters/http/cache/simple_http_cache/BUILD index 5b25659f64b0..f218cb4552d3 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/source/extensions/filters/http/cache/simple_http_cache/BUILD @@ -24,6 +24,6 @@ envoy_cc_extension( "//source/common/protobuf", "//source/extensions/filters/http/cache:http_cache_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@envoy_api//envoy/extensions/cache/simple_http_cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/cache/simple_http_cache/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index aea9185249a8..0564e854754f 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -1,6 +1,6 @@ #include "source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" #include "envoy/registry/registry.h" #include "source/common/buffer/buffer_impl.h" @@ -291,11 +291,10 @@ class SimpleHttpCacheFactory : public HttpCacheFactory { // From TypedFactory ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique< - envoy::extensions::cache::simple_http_cache::v3alpha::SimpleHttpCacheConfig>(); + envoy::extensions::cache::simple_http_cache::v3::SimpleHttpCacheConfig>(); } // From HttpCacheFactory - HttpCache& - getCache(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&) override { + HttpCache& getCache(const envoy::extensions::filters::http::cache::v3::CacheConfig&) override { return cache_; } diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index 742415947e36..883143436d7b 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -8,7 +8,7 @@ #include "absl/synchronization/mutex.h" // included to make code_format happy -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/http/cdn_loop/BUILD b/source/extensions/filters/http/cdn_loop/BUILD index 1d7d680bf80b..6ba442abac79 100644 --- a/source/extensions/filters/http/cdn_loop/BUILD +++ b/source/extensions/filters/http/cdn_loop/BUILD @@ -53,6 +53,6 @@ envoy_cc_extension( "//envoy/server:factory_context_interface", "//source/common/common:statusor_lib", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cdn_loop/config.cc b/source/extensions/filters/http/cdn_loop/config.cc index 93c3d155a8ad..2b910a29a89f 100644 --- a/source/extensions/filters/http/cdn_loop/config.cc +++ b/source/extensions/filters/http/cdn_loop/config.cc @@ -3,7 +3,7 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "envoy/http/filter.h" #include "envoy/registry/registry.h" #include "envoy/server/factory_context.h" @@ -22,7 +22,7 @@ using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParseContext; using ::Envoy::Extensions::HttpFilters::CdnLoop::Parser::ParsedCdnId; Http::FilterFactoryCb CdnLoopFilterFactory::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig& config, const std::string& /*stats_prefix*/, Server::Configuration::FactoryContext& /*context*/) { StatusOr context = parseCdnId(ParseContext(config.cdn_id())); if (!context.ok() || !context->context().atEnd()) { diff --git a/source/extensions/filters/http/cdn_loop/config.h b/source/extensions/filters/http/cdn_loop/config.h index 5d9fea5bd1fe..15b6f7fa4a47 100644 --- a/source/extensions/filters/http/cdn_loop/config.h +++ b/source/extensions/filters/http/cdn_loop/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.validate.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.validate.h" #include "envoy/http/filter.h" #include "envoy/server/factory_context.h" @@ -15,14 +15,13 @@ namespace HttpFilters { namespace CdnLoop { class CdnLoopFilterFactory - : public Common::FactoryBase< - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig> { + : public Common::FactoryBase { public: CdnLoopFilterFactory() : FactoryBase("envoy.filters.http.cdn_loop") {} private: Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig& config, + const envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/http/composite/config.cc b/source/extensions/filters/http/composite/config.cc index 4d399813a264..5702472fa016 100644 --- a/source/extensions/filters/http/composite/config.cc +++ b/source/extensions/filters/http/composite/config.cc @@ -19,7 +19,7 @@ Http::FilterFactoryCb CompositeFilterFactory::createFilterFactoryFromProtoTyped( ALL_COMPOSITE_FILTER_STATS(POOL_COUNTER_PREFIX(factory_context.scope(), prefix))}); return [stats](Http::FilterChainFactoryCallbacks& callbacks) -> void { - auto filter = std::make_shared(*stats); + auto filter = std::make_shared(*stats, callbacks.dispatcher()); callbacks.addStreamFilter(filter); callbacks.addAccessLogHandler(filter); }; diff --git a/source/extensions/filters/http/composite/factory_wrapper.h b/source/extensions/filters/http/composite/factory_wrapper.h index c436f52165e2..ec8d98e8a633 100644 --- a/source/extensions/filters/http/composite/factory_wrapper.h +++ b/source/extensions/filters/http/composite/factory_wrapper.h @@ -14,7 +14,8 @@ class Filter; // the lifetime of this wrapper by appending them to the errors_ field. This should be checked // afterwards to determine whether invalid callbacks were called. struct FactoryCallbacksWrapper : public Http::FilterChainFactoryCallbacks { - explicit FactoryCallbacksWrapper(Filter& filter) : filter_(filter) {} + FactoryCallbacksWrapper(Filter& filter, Event::Dispatcher& dispatcher) + : filter_(filter), dispatcher_(dispatcher) {} void addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr filter) override; void addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr, @@ -26,8 +27,10 @@ struct FactoryCallbacksWrapper : public Http::FilterChainFactoryCallbacks { void addStreamFilter(Http::StreamFilterSharedPtr, Matcher::MatchTreeSharedPtr) override; void addAccessLogHandler(AccessLog::InstanceSharedPtr) override; + Event::Dispatcher& dispatcher() override { return dispatcher_; } Filter& filter_; + Event::Dispatcher& dispatcher_; using FilterAlternative = absl::variant(); - FactoryCallbacksWrapper wrapper(*this); + FactoryCallbacksWrapper wrapper(*this, dispatcher_); composite_action.createFilters(wrapper); if (!wrapper.errors_.empty()) { diff --git a/source/extensions/filters/http/composite/filter.h b/source/extensions/filters/http/composite/filter.h index 2df7b4bc952a..a225653a0e6f 100644 --- a/source/extensions/filters/http/composite/filter.h +++ b/source/extensions/filters/http/composite/filter.h @@ -29,7 +29,8 @@ class Filter : public Http::StreamFilter, public AccessLog::Instance, Logger::Loggable { public: - explicit Filter(FilterStats& stats) : decoded_headers_(false), stats_(stats) {} + Filter(FilterStats& stats, Event::Dispatcher& dispatcher) + : dispatcher_(dispatcher), decoded_headers_(false), stats_(stats) {} // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, @@ -78,6 +79,7 @@ class Filter : public Http::StreamFilter, private: friend FactoryCallbacksWrapper; + Event::Dispatcher& dispatcher_; // Use these to track whether we are allowed to insert a specific kind of filter. These mainly // serve to surface an easier to understand error, as attempting to insert a filter at a later // time will result in various FM assertions firing. diff --git a/source/extensions/filters/http/compressor/compressor_filter.cc b/source/extensions/filters/http/compressor/compressor_filter.cc index 20462186cc1c..a5941c2e15d7 100644 --- a/source/extensions/filters/http/compressor/compressor_filter.cc +++ b/source/extensions/filters/http/compressor/compressor_filter.cc @@ -164,12 +164,8 @@ Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap } const auto& request_config = config_->requestDirectionConfig(); - const bool is_not_upgrade = - !Http::Utility::isUpgrade(headers) || - !Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.enable_compression_without_content_length_header"); - if (!end_stream && request_config.compressionEnabled() && is_not_upgrade && + if (!end_stream && request_config.compressionEnabled() && !Http::Utility::isUpgrade(headers) && request_config.isMinimumContentLength(headers) && request_config.isContentTypeAllowed(headers) && !headers.getInline(request_content_encoding_handle.handle()) && @@ -234,15 +230,11 @@ Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMa const auto& config = config_->responseDirectionConfig(); const bool isEnabledAndContentLengthBigEnough = config.compressionEnabled() && config.isMinimumContentLength(headers); - const bool is_not_upgrade = - !Http::Utility::isUpgrade(headers) || - !Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.enable_compression_without_content_length_header"); - - const bool isCompressible = isEnabledAndContentLengthBigEnough && is_not_upgrade && - config.isContentTypeAllowed(headers) && - !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && - !headers.getInline(response_content_encoding_handle.handle()); + + const bool isCompressible = + isEnabledAndContentLengthBigEnough && !Http::Utility::isUpgrade(headers) && + config.isContentTypeAllowed(headers) && !hasCacheControlNoTransform(headers) && + isEtagAllowed(headers) && !headers.getInline(response_content_encoding_handle.handle()); if (!end_stream && isEnabledAndContentLengthBigEnough && isAcceptEncodingAllowed(headers) && isCompressible && isTransferEncodingAllowed(headers)) { sanitizeEtagHeader(headers); @@ -520,13 +512,7 @@ bool CompressorFilterConfig::DirectionConfig::isMinimumContentLength( } return is_minimum_content_length; } - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.enable_compression_without_content_length_header")) { - // return true to ignore the minimum length configuration if no content-length header is present - return true; - } - return StringUtil::caseFindToken(headers.getTransferEncodingValue(), ",", - Http::Headers::get().TransferEncodingValues.Chunked); + return true; } bool CompressorFilter::isTransferEncodingAllowed(Http::RequestOrResponseHeaderMap& headers) const { diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 90df75ee279d..c6021c566bae 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -13,14 +13,6 @@ namespace Extensions { namespace HttpFilters { namespace ExtAuthz { -struct RcDetailsValues { - // The ext_authz filter denied the downstream request. - const std::string AuthzDenied = "ext_authz_denied"; - // The ext_authz filter encountered a failure, and was configured to fail-closed. - const std::string AuthzError = "ext_authz_error"; -}; -using RcDetails = ConstSingleton; - void FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) { // We only merge context extensions here, and leave boolean flags untouched since those flags are // not used from the merged config. @@ -91,8 +83,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, *decoder_callbacks_); decoder_callbacks_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UnauthorizedExternalService); - decoder_callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, - absl::nullopt, RcDetails::get().AuthzError); + decoder_callbacks_->sendLocalReply( + config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt, + Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzError); return Http::FilterHeadersStatus::StopIteration; } return Http::FilterHeadersStatus::Continue; @@ -162,7 +155,8 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers ENVOY_STREAM_LOG(trace, "ext_authz filter has {} response header(s) to add and {} response header(s) to " "set to the encoded response:", - *encoder_callbacks_, response_headers_to_add_.size()); + *encoder_callbacks_, response_headers_to_add_.size(), + response_headers_to_set_.size()); if (!response_headers_to_add_.empty()) { ENVOY_STREAM_LOG( trace, "ext_authz filter added header(s) to the encoded response:", *encoder_callbacks_); @@ -222,12 +216,13 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { switch (response->status) { case CheckStatus::OK: { - // Any changes to request headers can affect how the request is going to be + // Any changes to request headers or query parameters can affect how the request is going to be // routed. If we are changing the headers we also need to clear the route // cache. if (config_->clearRouteCache() && (!response->headers_to_set.empty() || !response->headers_to_append.empty() || - !response->headers_to_remove.empty())) { + !response->headers_to_remove.empty() || !response->query_parameters_to_set.empty() || + !response->query_parameters_to_remove.empty())) { ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *decoder_callbacks_); decoder_callbacks_->clearRouteCache(); } @@ -286,6 +281,42 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { response_headers_to_set_ = std::move(response->response_headers_to_set); } + absl::optional modified_query_parameters; + if (!response->query_parameters_to_set.empty()) { + modified_query_parameters = + Http::Utility::parseQueryString(request_headers_->Path()->value().getStringView()); + ENVOY_STREAM_LOG( + trace, "ext_authz filter set query parameter(s) on the request:", *decoder_callbacks_); + for (const auto& [key, value] : response->query_parameters_to_set) { + ENVOY_STREAM_LOG(trace, "'{}={}'", *decoder_callbacks_, key, value); + (*modified_query_parameters)[key] = value; + } + } + + if (!response->query_parameters_to_remove.empty()) { + if (!modified_query_parameters) { + modified_query_parameters = + Http::Utility::parseQueryString(request_headers_->Path()->value().getStringView()); + } + ENVOY_STREAM_LOG(trace, "ext_authz filter removed query parameter(s) from the request:", + *decoder_callbacks_); + for (const auto& key : response->query_parameters_to_remove) { + ENVOY_STREAM_LOG(trace, "'{}'", *decoder_callbacks_, key); + (*modified_query_parameters).erase(key); + } + } + + // We modified the query parameters in some way, so regenerate the `path` header and set it + // here. + if (modified_query_parameters) { + const auto new_path = Http::Utility::replaceQueryString(request_headers_->Path()->value(), + modified_query_parameters.value()); + ENVOY_STREAM_LOG( + trace, "ext_authz filter modified query parameter(s), using new path for request: {}", + *decoder_callbacks_, new_path); + request_headers_->setPath(new_path); + } + if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_); } @@ -333,7 +364,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { response_headers.addCopy(header.first, header.second); } }, - absl::nullopt, RcDetails::get().AuthzDenied); + absl::nullopt, Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzDenied); decoder_callbacks_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UnauthorizedExternalService); break; @@ -358,8 +389,9 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { *decoder_callbacks_, enumToInt(config_->statusOnError())); decoder_callbacks_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UnauthorizedExternalService); - decoder_callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, - absl::nullopt, RcDetails::get().AuthzError); + decoder_callbacks_->sendLocalReply( + config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt, + Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzError); } break; } diff --git a/source/extensions/filters/http/ext_proc/BUILD b/source/extensions/filters/http/ext_proc/BUILD index 4fb765eb80fc..209cc344ded3 100644 --- a/source/extensions/filters/http/ext_proc/BUILD +++ b/source/extensions/filters/http/ext_proc/BUILD @@ -29,8 +29,8 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -42,7 +42,7 @@ envoy_cc_extension( ":client_lib", ":ext_proc", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", ], ) @@ -52,7 +52,7 @@ envoy_cc_library( deps = [ "//envoy/grpc:status", "//envoy/stream_info:stream_info_interface", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -65,7 +65,7 @@ envoy_cc_library( "//envoy/http:header_map_interface", "//source/common/http:header_utility_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -80,6 +80,6 @@ envoy_cc_library( "//envoy/upstream:cluster_manager_interface", "//source/common/grpc:typed_async_client_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/ext_proc/client.h b/source/extensions/filters/http/ext_proc/client.h index 7dcc434f2a9a..9d6654908654 100644 --- a/source/extensions/filters/http/ext_proc/client.h +++ b/source/extensions/filters/http/ext_proc/client.h @@ -4,7 +4,7 @@ #include "envoy/common/pure.h" #include "envoy/grpc/status.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/stream_info/stream_info.h" namespace Envoy { @@ -15,7 +15,7 @@ namespace ExternalProcessing { class ExternalProcessorStream { public: virtual ~ExternalProcessorStream() = default; - virtual void send(envoy::service::ext_proc::v3alpha::ProcessingRequest&& request, + virtual void send(envoy::service::ext_proc::v3::ProcessingRequest&& request, bool end_stream) PURE; // Idempotent close. Return true if it actually closed. virtual bool close() PURE; @@ -27,7 +27,7 @@ class ExternalProcessorCallbacks { public: virtual ~ExternalProcessorCallbacks() = default; virtual void onReceiveMessage( - std::unique_ptr&& response) PURE; + std::unique_ptr&& response) PURE; virtual void onGrpcError(Grpc::Status::GrpcStatus error) PURE; virtual void onGrpcClose() PURE; }; diff --git a/source/extensions/filters/http/ext_proc/client_impl.cc b/source/extensions/filters/http/ext_proc/client_impl.cc index 92e811b81ddd..79b1ebf94411 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.cc +++ b/source/extensions/filters/http/ext_proc/client_impl.cc @@ -5,8 +5,7 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -static constexpr char kExternalMethod[] = - "envoy.service.ext_proc.v3alpha.ExternalProcessor.Process"; +static constexpr char kExternalMethod[] = "envoy.service.ext_proc.v3.ExternalProcessor.Process"; ExternalProcessorClientImpl::ExternalProcessorClientImpl( Grpc::AsyncClientManager& client_manager, @@ -35,8 +34,8 @@ ExternalProcessorStreamImpl::ExternalProcessorStreamImpl( stream_ = client_.start(*descriptor, *this, options); } -void ExternalProcessorStreamImpl::send( - envoy::service::ext_proc::v3alpha::ProcessingRequest&& request, bool end_stream) { +void ExternalProcessorStreamImpl::send(envoy::service::ext_proc::v3::ProcessingRequest&& request, + bool end_stream) { stream_.sendMessage(std::move(request), end_stream); } diff --git a/source/extensions/filters/http/ext_proc/client_impl.h b/source/extensions/filters/http/ext_proc/client_impl.h index 8516ce99f8bc..d1381e5093a3 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.h +++ b/source/extensions/filters/http/ext_proc/client_impl.h @@ -5,14 +5,14 @@ #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/grpc/async_client_manager.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/stats/scope.h" #include "source/common/grpc/typed_async_client.h" #include "source/extensions/filters/http/ext_proc/client.h" -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/http/ext_proc/config.cc b/source/extensions/filters/http/ext_proc/config.cc index 1e8ec8e321c4..607126d5f93e 100644 --- a/source/extensions/filters/http/ext_proc/config.cc +++ b/source/extensions/filters/http/ext_proc/config.cc @@ -9,7 +9,7 @@ namespace HttpFilters { namespace ExternalProcessing { Http::FilterFactoryCb ExternalProcessingFilterConfig::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { const uint32_t message_timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config, message_timeout, DefaultMessageTimeoutMs); @@ -28,7 +28,7 @@ Http::FilterFactoryCb ExternalProcessingFilterConfig::createFilterFactoryFromPro Router::RouteSpecificFilterConfigConstSharedPtr ExternalProcessingFilterConfig::createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& proto_config, Server::Configuration::ServerFactoryContext&, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config); } diff --git a/source/extensions/filters/http/ext_proc/config.h b/source/extensions/filters/http/ext_proc/config.h index 9918f341c402..bf47ede88fed 100644 --- a/source/extensions/filters/http/ext_proc/config.h +++ b/source/extensions/filters/http/ext_proc/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.validate.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -13,9 +13,8 @@ namespace HttpFilters { namespace ExternalProcessing { class ExternalProcessingFilterConfig - : public Common::FactoryBase< - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor, - envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute> { + : public Common::FactoryBase { public: ExternalProcessingFilterConfig() : FactoryBase("envoy.filters.http.ext_proc") {} @@ -24,11 +23,11 @@ class ExternalProcessingFilterConfig static constexpr uint64_t DefaultMessageTimeoutMs = 200; Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; Router::RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfigTyped( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& proto_config, + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor& validator) override; }; diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index 79d34513af7e..1eca01fa4ab7 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -10,12 +10,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::FilterDataStatus; using Http::FilterHeadersStatus; diff --git a/source/extensions/filters/http/ext_proc/ext_proc.h b/source/extensions/filters/http/ext_proc/ext_proc.h index f1fb8994f502..f831e69ee7ce 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.h +++ b/source/extensions/filters/http/ext_proc/ext_proc.h @@ -5,10 +5,10 @@ #include #include "envoy/event/timer.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" #include "envoy/grpc/async_client.h" #include "envoy/http/filter.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -38,7 +38,7 @@ struct ExtProcFilterStats { class FilterConfig { public: - FilterConfig(const envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor& config, + FilterConfig(const envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor& config, const std::chrono::milliseconds message_timeout, Stats::Scope& scope, const std::string& stats_prefix) : failure_mode_allow_(config.failure_mode_allow()), message_timeout_(message_timeout), @@ -51,8 +51,7 @@ class FilterConfig { const ExtProcFilterStats& stats() const { return stats_; } - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& - processingMode() const { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& processingMode() const { return processing_mode_; } @@ -67,7 +66,7 @@ class FilterConfig { const std::chrono::milliseconds message_timeout_; ExtProcFilterStats stats_; - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode processing_mode_; + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode processing_mode_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -75,20 +74,19 @@ using FilterConfigSharedPtr = std::shared_ptr; class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { public: explicit FilterConfigPerRoute( - const envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute& config); + const envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute& config); void merge(const FilterConfigPerRoute& other); bool disabled() const { return disabled_; } - const absl::optional& + const absl::optional& processingMode() const { return processing_mode_; } private: bool disabled_; - absl::optional - processing_mode_; + absl::optional processing_mode_; }; class Filter : public Logger::Loggable, @@ -129,7 +127,7 @@ class Filter : public Logger::Loggable, // ExternalProcessorCallbacks void onReceiveMessage( - std::unique_ptr&& response) override; + std::unique_ptr&& response) override; void onGrpcError(Grpc::Status::GrpcStatus error) override; @@ -153,7 +151,7 @@ class Filter : public Logger::Loggable, void cleanUpTimers(); void clearAsyncState(); - void sendImmediateResponse(const envoy::service::ext_proc::v3alpha::ImmediateResponse& response); + void sendImmediateResponse(const envoy::service::ext_proc::v3::ImmediateResponse& response); Http::FilterHeadersStatus onHeaders(ProcessorState& state, Http::RequestOrResponseHeaderMap& headers, bool end_stream); @@ -183,7 +181,7 @@ class Filter : public Logger::Loggable, }; extern std::string responseCaseToString( - const envoy::service::ext_proc::v3alpha::ProcessingResponse::ResponseCase response_case); + const envoy::service::ext_proc::v3::ProcessingResponse::ResponseCase response_case); } // namespace ExternalProcessing } // namespace HttpFilters diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.cc b/source/extensions/filters/http/ext_proc/mutation_utils.cc index 1079716b8a7c..b4d87f536e44 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.cc +++ b/source/extensions/filters/http/ext_proc/mutation_utils.cc @@ -15,11 +15,11 @@ using Http::Headers; using Http::LowerCaseString; using envoy::config::core::v3::HeaderValueOption; -using envoy::service::ext_proc::v3alpha::BodyMutation; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::HeadersResponse; +using envoy::service::ext_proc::v3::BodyMutation; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::HeadersResponse; void MutationUtils::headersToProto(const Http::HeaderMap& headers_in, envoy::config::core::v3::HeaderMap& proto_out) { diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.h b/source/extensions/filters/http/ext_proc/mutation_utils.h index f57c13793d8b..2776c807bfa1 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.h +++ b/source/extensions/filters/http/ext_proc/mutation_utils.h @@ -2,7 +2,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/http/header_map.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/common/logger.h" @@ -19,22 +19,21 @@ class MutationUtils : public Logger::Loggable { // Apply mutations that are common to header responses. static void - applyCommonHeaderResponse(const envoy::service::ext_proc::v3alpha::HeadersResponse& response, + applyCommonHeaderResponse(const envoy::service::ext_proc::v3::HeadersResponse& response, Http::HeaderMap& headers); // Modify header map based on a set of mutations from a protobuf - static void - applyHeaderMutations(const envoy::service::ext_proc::v3alpha::HeaderMutation& mutation, - Http::HeaderMap& headers, bool replacing_message); + static void applyHeaderMutations(const envoy::service::ext_proc::v3::HeaderMutation& mutation, + Http::HeaderMap& headers, bool replacing_message); // Apply mutations that are common to body responses. // Mutations will be applied to the header map if it is not null. - static void applyCommonBodyResponse(const envoy::service::ext_proc::v3alpha::BodyResponse& body, + static void applyCommonBodyResponse(const envoy::service::ext_proc::v3::BodyResponse& body, Http::RequestOrResponseHeaderMap* headers, Buffer::Instance& buffer); // Modify a buffer based on a set of mutations from a protobuf - static void applyBodyMutations(const envoy::service::ext_proc::v3alpha::BodyMutation& mutation, + static void applyBodyMutations(const envoy::service::ext_proc::v3::BodyMutation& mutation, Buffer::Instance& buffer); // Determine if a particular HTTP status code is valid. diff --git a/source/extensions/filters/http/ext_proc/processor_state.cc b/source/extensions/filters/http/ext_proc/processor_state.cc index 8cc1bb21de79..0448c475e28b 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.cc +++ b/source/extensions/filters/http/ext_proc/processor_state.cc @@ -10,13 +10,13 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::TrailersResponse; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::TrailersResponse; void ProcessorState::startMessageTimer(Event::TimerCb cb, std::chrono::milliseconds timeout) { if (!message_timer_) { diff --git a/source/extensions/filters/http/ext_proc/processor_state.h b/source/extensions/filters/http/ext_proc/processor_state.h index d3f74746e0bd..6c6a07bfead3 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.h +++ b/source/extensions/filters/http/ext_proc/processor_state.h @@ -5,10 +5,10 @@ #include "envoy/buffer/buffer.h" #include "envoy/event/timer.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/processing_mode.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" @@ -96,11 +96,10 @@ class ProcessorState : public Logger::Loggable { bool partialBodyProcessed() const { return partial_body_processed_; } virtual void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) PURE; + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) PURE; bool sendHeaders() const { return send_headers_; } bool sendTrailers() const { return send_trailers_; } - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode - bodyMode() const { + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode bodyMode() const { return body_mode_; } @@ -114,9 +113,9 @@ class ProcessorState : public Logger::Loggable { virtual void requestWatermark() PURE; virtual void clearWatermark() PURE; - bool handleHeadersResponse(const envoy::service::ext_proc::v3alpha::HeadersResponse& response); - bool handleBodyResponse(const envoy::service::ext_proc::v3alpha::BodyResponse& response); - bool handleTrailersResponse(const envoy::service::ext_proc::v3alpha::TrailersResponse& response); + bool handleHeadersResponse(const envoy::service::ext_proc::v3::HeadersResponse& response); + bool handleBodyResponse(const envoy::service::ext_proc::v3::BodyResponse& response); + bool handleTrailersResponse(const envoy::service::ext_proc::v3::TrailersResponse& response); virtual const Buffer::Instance* bufferedData() const PURE; bool hasBufferedData() const { return bufferedData() != nullptr && bufferedData()->length() > 0; } @@ -144,16 +143,16 @@ class ProcessorState : public Logger::Loggable { void continueIfNecessary(); void clearAsyncState(); - virtual envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; - virtual envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; - virtual envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; + virtual envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const PURE; protected: void setBodyMode( - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode body_mode); + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode body_mode); Filter& filter_; Http::StreamFilterCallbacks* filter_callbacks_; @@ -183,7 +182,7 @@ class ProcessorState : public Logger::Loggable { bool send_trailers_ : 1; // The specific mode for body handling - envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode body_mode_; + envoy::extensions::filters::http::ext_proc::v3::ProcessingMode_BodySendMode body_mode_; Http::RequestOrResponseHeaderMap* headers_ = nullptr; Http::HeaderMap* trailers_ = nullptr; @@ -194,8 +193,7 @@ class ProcessorState : public Logger::Loggable { class DecodingProcessorState : public ProcessorState { public: explicit DecodingProcessorState( - Filter& filter, - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) + Filter& filter, const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) : ProcessorState(filter) { setProcessingModeInternal(mode); } @@ -232,23 +230,23 @@ class DecodingProcessorState : public ProcessorState { void continueProcessing() const override { decoder_callbacks_->continueDecoding(); } - envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_headers(); } - envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_body(); } - envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_request_trailers(); } void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) override { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) override { setProcessingModeInternal(mode); } @@ -257,7 +255,7 @@ class DecodingProcessorState : public ProcessorState { private: void setProcessingModeInternal( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode); + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode); Http::StreamDecoderFilterCallbacks* decoder_callbacks_{}; }; @@ -265,8 +263,7 @@ class DecodingProcessorState : public ProcessorState { class EncodingProcessorState : public ProcessorState { public: explicit EncodingProcessorState( - Filter& filter, - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) + Filter& filter, const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) : ProcessorState(filter) { setProcessingModeInternal(mode); } @@ -303,23 +300,23 @@ class EncodingProcessorState : public ProcessorState { void continueProcessing() const override { encoder_callbacks_->continueEncoding(); } - envoy::service::ext_proc::v3alpha::HttpHeaders* - mutableHeaders(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpHeaders* + mutableHeaders(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_headers(); } - envoy::service::ext_proc::v3alpha::HttpBody* - mutableBody(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpBody* + mutableBody(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_body(); } - envoy::service::ext_proc::v3alpha::HttpTrailers* - mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const override { + envoy::service::ext_proc::v3::HttpTrailers* + mutableTrailers(envoy::service::ext_proc::v3::ProcessingRequest& request) const override { return request.mutable_response_trailers(); } void setProcessingMode( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) override { + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode) override { setProcessingModeInternal(mode); } @@ -328,7 +325,7 @@ class EncodingProcessorState : public ProcessorState { private: void setProcessingModeInternal( - const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode); + const envoy::extensions::filters::http::ext_proc::v3::ProcessingMode& mode); Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; }; diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index b1b1cd319a40..3bb7f57c5637 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -62,9 +62,6 @@ using RcDetails = ConstSingleton; namespace { -constexpr absl::string_view buffer_limits_runtime_feature = - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits"; - const Http::LowerCaseString& trailerHeader() { CONSTRUCT_ON_FIRST_USE(Http::LowerCaseString, "trailer"); } @@ -214,6 +211,7 @@ JsonTranscoderConfig::JsonTranscoderConfig( default: NOT_REACHED_GCOVR_EXCL_LINE; } + pmb.SetQueryParamUnescapePlus(proto_config.query_param_unescape_plus()); path_matcher_ = pmb.Build(); @@ -892,10 +890,6 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_ } bool JsonTranscoderFilter::decoderBufferLimitReached(uint64_t buffer_length) { - if (!Runtime::runtimeFeatureEnabled(buffer_limits_runtime_feature)) { - return false; - } - if (buffer_length > decoder_callbacks_->decoderBufferLimit()) { ENVOY_LOG(debug, "Request rejected because the transcoder's internal buffer size exceeds the " @@ -914,10 +908,6 @@ bool JsonTranscoderFilter::decoderBufferLimitReached(uint64_t buffer_length) { } bool JsonTranscoderFilter::encoderBufferLimitReached(uint64_t buffer_length) { - if (!Runtime::runtimeFeatureEnabled(buffer_limits_runtime_feature)) { - return false; - } - if (buffer_length > encoder_callbacks_->encoderBufferLimit()) { ENVOY_LOG(debug, "Response not transcoded because the transcoder's internal buffer size exceeds the " diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index 1ecafee6b332..3841dd7b6cc7 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -37,12 +37,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # Legacy test use. TODO(#9953) clean up. - extra_visibility = [ - "//test/common/filter:__subpackages__", - "//test/integration:__subpackages__", - "//test/server:__subpackages__", - ], deps = [ "//envoy/registry", "//source/common/http:header_utility_lib", diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index 4a15e62df9f9..08b3cb8b29c7 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -106,9 +106,7 @@ Http::FilterHeadersStatus HealthCheckFilter::encodeHeaders(Http::ResponseHeaderM headers.setEnvoyUpstreamHealthCheckedCluster(context_.localInfo().clusterName()); } - if (context_.healthCheckFailed() && - Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster")) { + if (context_.healthCheckFailed()) { headers.setReferenceEnvoyImmediateHealthCheckFail( Http::Headers::get().EnvoyImmediateHealthCheckFailValues.True); } diff --git a/source/extensions/filters/http/oauth2/BUILD b/source/extensions/filters/http/oauth2/BUILD index 37a6f9523a29..c1c51980d610 100644 --- a/source/extensions/filters/http/oauth2/BUILD +++ b/source/extensions/filters/http/oauth2/BUILD @@ -55,7 +55,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) @@ -67,6 +67,6 @@ envoy_cc_extension( ":oauth_lib", "//envoy/registry", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/oauth2/config.cc b/source/extensions/filters/http/oauth2/config.cc index 74bc5567504a..15ae78ee0760 100644 --- a/source/extensions/filters/http/oauth2/config.cc +++ b/source/extensions/filters/http/oauth2/config.cc @@ -5,7 +5,7 @@ #include #include "envoy/common/exception.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/secret/secret_manager.h" #include "envoy/secret/secret_provider.h" @@ -37,7 +37,7 @@ secretsProvider(const envoy::extensions::transport_sockets::tls::v3::SdsSecretCo } // namespace Http::FilterFactoryCb OAuth2Config::createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2& proto, + const envoy::extensions::filters::http::oauth2::v3::OAuth2& proto, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { if (!proto.has_config()) { throw EnvoyException("config must be present for global config"); diff --git a/source/extensions/filters/http/oauth2/config.h b/source/extensions/filters/http/oauth2/config.h index 10f4fbc8b84d..6db0c0d48052 100644 --- a/source/extensions/filters/http/oauth2/config.h +++ b/source/extensions/filters/http/oauth2/config.h @@ -2,8 +2,8 @@ #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -13,13 +13,14 @@ namespace HttpFilters { namespace Oauth2 { class OAuth2Config : public Extensions::HttpFilters::Common::FactoryBase< - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2> { + envoy::extensions::filters::http::oauth2::v3::OAuth2> { public: OAuth2Config() : FactoryBase("envoy.filters.http.oauth2") {} - Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2&, const std::string&, - Server::Configuration::FactoryContext&) override; + Http::FilterFactoryCb + createFilterFactoryFromProtoTyped(const envoy::extensions::filters::http::oauth2::v3::OAuth2&, + const std::string&, + Server::Configuration::FactoryContext&) override; }; } // namespace Oauth2 diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index d0234dd733b0..4f1def409668 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -36,11 +36,13 @@ namespace { Http::RegisterCustomInlineHeader authorization_handle(Http::CustomHeaders::get().Authorization); -constexpr absl::string_view SignoutCookieValue = - "OauthHMAC=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT"; +// Deleted OauthHMAC cookie. +constexpr const char* SignoutCookieValue = + "{}=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT"; -constexpr absl::string_view SignoutBearerTokenValue = - "BearerToken=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT"; +// Deleted BearerToken cookie. +constexpr const char* SignoutBearerTokenValue = + "{}=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT"; constexpr const char* CookieTailFormatString = ";version=1;path=/;Max-Age={};secure"; @@ -116,7 +118,7 @@ std::string findValue(const absl::flat_hash_map& map, } // namespace FilterConfig::FilterConfig( - const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config, + const envoy::extensions::filters::http::oauth2::v3::OAuth2Config& proto_config, Upstream::ClusterManager& cluster_manager, std::shared_ptr secret_reader, Stats::Scope& scope, const std::string& stats_prefix) : oauth_token_endpoint_(proto_config.token_endpoint()), @@ -130,7 +132,8 @@ FilterConfig::FilterConfig( absl::StrJoin(authScopesList(proto_config.auth_scopes()), " "), ":/=&? ")), encoded_resource_query_params_(encodeResourceList(proto_config.resources())), forward_bearer_token_(proto_config.forward_bearer_token()), - pass_through_header_matchers_(headerMatchers(proto_config.pass_through_matcher())) { + pass_through_header_matchers_(headerMatchers(proto_config.pass_through_matcher())), + cookie_names_(proto_config.credentials().cookie_names()) { if (!cluster_manager.clusters().hasCluster(oauth_token_endpoint_.cluster())) { throw EnvoyException(fmt::format("OAuth2 filter: unknown cluster '{}' in config. Please " "specify which cluster to direct OAuth requests to.", @@ -144,13 +147,14 @@ FilterStats FilterConfig::generateStats(const std::string& prefix, Stats::Scope& void OAuth2CookieValidator::setParams(const Http::RequestHeaderMap& headers, const std::string& secret) { - const auto& cookies = Http::Utility::parseCookies(headers, [](absl::string_view key) -> bool { - return key == "OauthExpires" || key == "BearerToken" || key == "OauthHMAC"; + const auto& cookies = Http::Utility::parseCookies(headers, [this](absl::string_view key) -> bool { + return key == cookie_names_.oauth_expires_ || key == cookie_names_.bearer_token_ || + key == cookie_names_.oauth_hmac_; }); - expires_ = findValue(cookies, "OauthExpires"); - token_ = findValue(cookies, "BearerToken"); - hmac_ = findValue(cookies, "OauthHMAC"); + expires_ = findValue(cookies, cookie_names_.oauth_expires_); + token_ = findValue(cookies, cookie_names_.bearer_token_); + hmac_ = findValue(cookies, cookie_names_.oauth_hmac_); host_ = headers.Host()->value().getStringView(); secret_.assign(secret.begin(), secret.end()); @@ -180,7 +184,7 @@ bool OAuth2CookieValidator::isValid() const { return hmacIsValid() && timestampI OAuth2Filter::OAuth2Filter(FilterConfigSharedPtr config, std::unique_ptr&& oauth_client, TimeSource& time_source) - : validator_(std::make_shared(time_source)), + : validator_(std::make_shared(time_source, config->cookieNames())), oauth_client_(std::move(oauth_client)), config_(std::move(config)), time_source_(time_source) { @@ -397,8 +401,12 @@ Http::FilterHeadersStatus OAuth2Filter::signOutUser(const Http::RequestHeaderMap {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})}; const std::string new_path = absl::StrCat(Http::Utility::getScheme(headers), "://", host_, "/"); - response_headers->addReference(Http::Headers::get().SetCookie, SignoutCookieValue); - response_headers->addReference(Http::Headers::get().SetCookie, SignoutBearerTokenValue); + response_headers->addReferenceKey( + Http::Headers::get().SetCookie, + fmt::format(SignoutCookieValue, config_->cookieNames().oauth_hmac_)); + response_headers->addReferenceKey( + Http::Headers::get().SetCookie, + fmt::format(SignoutBearerTokenValue, config_->cookieNames().bearer_token_)); response_headers->setLocation(new_path); decoder_callbacks_->encodeHeaders(std::move(response_headers), true, SIGN_OUT); @@ -457,18 +465,21 @@ void OAuth2Filter::finishFlow() { Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})}; + const CookieNames& cookie_names = config_->cookieNames(); + response_headers->addReferenceKey( Http::Headers::get().SetCookie, - absl::StrCat("OauthHMAC=", encoded_token, cookie_tail_http_only)); + absl::StrCat(cookie_names.oauth_hmac_, "=", encoded_token, cookie_tail_http_only)); response_headers->addReferenceKey( Http::Headers::get().SetCookie, - absl::StrCat("OauthExpires=", new_expires_, cookie_tail_http_only)); + absl::StrCat(cookie_names.oauth_expires_, "=", new_expires_, cookie_tail_http_only)); // If opted-in, we also create a new Bearer cookie for the authorization token provided by the // auth server. if (config_->forwardBearerToken()) { - response_headers->addReferenceKey(Http::Headers::get().SetCookie, - absl::StrCat("BearerToken=", access_token_, cookie_tail)); + response_headers->addReferenceKey( + Http::Headers::get().SetCookie, + absl::StrCat(cookie_names.bearer_token_, "=", access_token_, cookie_tail)); } response_headers->setLocation(state_); diff --git a/source/extensions/filters/http/oauth2/filter.h b/source/extensions/filters/http/oauth2/filter.h index e2e4b88be3b8..5b26f62b6446 100644 --- a/source/extensions/filters/http/oauth2/filter.h +++ b/source/extensions/filters/http/oauth2/filter.h @@ -7,7 +7,7 @@ #include "envoy/common/callback.h" #include "envoy/common/matchers.h" #include "envoy/config/core/v3/http_uri.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" #include "envoy/http/header_map.h" #include "envoy/server/filter_config.h" #include "envoy/stats/stats_macros.h" @@ -94,13 +94,33 @@ struct FilterStats { ALL_OAUTH_FILTER_STATS(GENERATE_COUNTER_STRUCT) }; +/** + * Helper structure to hold custom cookie names. + */ +struct CookieNames { + CookieNames(const envoy::extensions::filters::http::oauth2::v3::OAuth2Credentials::CookieNames& + cookie_names) + : CookieNames(cookie_names.bearer_token(), cookie_names.oauth_hmac(), + cookie_names.oauth_expires()) {} + + CookieNames(const std::string& bearer_token, const std::string& oauth_hmac, + const std::string& oauth_expires) + : bearer_token_(bearer_token.empty() ? "BearerToken" : bearer_token), + oauth_hmac_(oauth_hmac.empty() ? "OauthHMAC" : oauth_hmac), + oauth_expires_(oauth_expires.empty() ? "OauthExpires" : oauth_expires) {} + + const std::string bearer_token_; + const std::string oauth_hmac_; + const std::string oauth_expires_; +}; + /** * This class encapsulates all data needed for the filter to operate so that we don't pass around * raw protobufs and other arbitrary data. */ class FilterConfig { public: - FilterConfig(const envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config& proto_config, + FilterConfig(const envoy::extensions::filters::http::oauth2::v3::OAuth2Config& proto_config, Upstream::ClusterManager& cluster_manager, std::shared_ptr secret_reader, Stats::Scope& scope, const std::string& stats_prefix); @@ -123,6 +143,7 @@ class FilterConfig { FilterStats& stats() { return stats_; } const std::string& encodedAuthScopes() const { return encoded_auth_scopes_; } const std::string& encodedResourceQueryParams() const { return encoded_resource_query_params_; } + const CookieNames& cookieNames() const { return cookie_names_; } private: static FilterStats generateStats(const std::string& prefix, Stats::Scope& scope); @@ -139,6 +160,7 @@ class FilterConfig { const std::string encoded_resource_query_params_; const bool forward_bearer_token_ : 1; const std::vector pass_through_header_matchers_; + const CookieNames cookie_names_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -164,7 +186,8 @@ class CookieValidator { class OAuth2CookieValidator : public CookieValidator { public: - explicit OAuth2CookieValidator(TimeSource& time_source) : time_source_(time_source) {} + explicit OAuth2CookieValidator(TimeSource& time_source, const CookieNames& cookie_names) + : time_source_(time_source), cookie_names_(cookie_names) {} const std::string& token() const override { return token_; } void setParams(const Http::RequestHeaderMap& headers, const std::string& secret) override; @@ -179,6 +202,7 @@ class OAuth2CookieValidator : public CookieValidator { std::vector secret_; absl::string_view host_; TimeSource& time_source_; + const CookieNames cookie_names_; }; /** diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index c007471d7779..b10e12688b0d 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -200,13 +200,13 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, if (status == Filters::Common::RateLimit::LimitStatus::OverLimit && config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enforcing", 100)) { state_ = State::Responded; + callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited); callbacks_->sendLocalReply( Http::Code::TooManyRequests, response_body, [this](Http::HeaderMap& headers) { populateResponseHeaders(headers, /*from_local_reply=*/true); }, config_->rateLimitedGrpcStatus(), RcDetails::get().RateLimited); - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimited); } else if (status == Filters::Common::RateLimit::LimitStatus::Error) { if (config_->failureModeAllow()) { cluster_->statsScope().counterFromStatName(stat_names.failure_mode_allowed_).inc(); @@ -216,9 +216,9 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, } } else { state_ = State::Responded; + callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError); callbacks_->sendLocalReply(Http::Code::InternalServerError, response_body, nullptr, absl::nullopt, RcDetails::get().RateLimitError); - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError); } } else if (!initiating_call_) { appendRequestHeaders(req_headers_to_add); diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index 59b28e1febcb..8f77cbcf771e 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -61,10 +61,6 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { cb.dispatcher(), [this](uint32_t events) { ENVOY_LOG(trace, "http inspector event: {}", events); - // inspector is always peeking and can never determine EOF. - // Use this event type to avoid listener timeout on the OS supporting - // FileReadyType::Closed. - bool end_stream = events & Event::FileReadyType::Closed; const ParseState parse_state = onRead(); switch (parse_state) { @@ -78,19 +74,11 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { cb_->continueFilterChain(true); break; case ParseState::Continue: - if (end_stream) { - // Parser fails to determine http but the end of stream is reached. Fallback to - // non-http. - done(false); - cb_->socket().ioHandle().resetFileEvents(); - cb_->continueFilterChain(true); - } // do nothing but wait for the next event break; } }, - Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed); + Event::PlatformDefaultTriggerType, Event::FileReadyType::Read); return Network::FilterStatus::StopIteration; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -107,6 +95,11 @@ ParseState Filter::onRead() { return ParseState::Error; } + // Remote closed + if (result.return_value_ == 0) { + return ParseState::Error; + } + const auto parse_state = parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.return_value_)); switch (parse_state) { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index aed47748e9ae..ee9e0c53f025 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -490,7 +490,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { } else { return ReadOrParseState::Error; } - } else { + } else if (nread != 0) { const auto result = io_handle.recv(buf_ + buf_off_, nread, 0); nread = result.return_value_; if (!result.ok()) { diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index fb0f06e6c2d5..92a70d07a46f 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -93,12 +93,6 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { socket.ioHandle().initializeFileEvent( cb.dispatcher(), [this](uint32_t events) { - if (events & Event::FileReadyType::Closed) { - config_->stats().connection_closed_.inc(); - done(false); - return; - } - ASSERT(events == Event::FileReadyType::Read); ParseState parse_state = onRead(); switch (parse_state) { @@ -113,8 +107,7 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { break; } }, - Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed); + Event::PlatformDefaultTriggerType, Event::FileReadyType::Read); return Network::FilterStatus::StopIteration; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -176,6 +169,11 @@ ParseState Filter::onRead() { return ParseState::Error; } + if (result.return_value_ == 0) { + config_->stats().connection_closed_.inc(); + return ParseState::Error; + } + // Because we're doing a MSG_PEEK, data we've seen before gets returned every time, so // skip over what we've already processed. if (static_cast(result.return_value_) > read_) { diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index e828884ff26f..bcdc2e0233d9 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -183,7 +183,7 @@ void ActiveMessageEncoderFilter::continueEncoding() { ActiveMessage::ActiveMessage(ConnectionManager& parent) : parent_(parent), request_timer_(std::make_unique( parent_.stats().request_time_ms_, parent.timeSystem())), - request_id_(-1), stream_id_(parent.randomGenerator().random()), + stream_id_(parent.randomGenerator().random()), stream_info_(parent.timeSystem(), parent_.connection().connectionInfoProviderSharedPtr()), pending_stream_decoded_(false), local_response_sent_(false) { parent_.stats().request_active_.inc(); @@ -346,7 +346,6 @@ FilterStatus ActiveMessage::applyEncoderFilters(ActiveMessageEncoderFilter* filt void ActiveMessage::sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) { ASSERT(metadata_); - metadata_->setRequestId(request_id_); parent_.sendLocalReply(*metadata_, response, end_stream); if (end_stream) { diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h index 5e860be3ddc2..c310e85b8ab2 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.h +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -202,8 +202,6 @@ class ActiveMessage : public LinkedObject, std::list encoder_filters_; std::function encoder_filter_action_; - int32_t request_id_; - // This value is used in the calculation of the weighted cluster. uint64_t stream_id_; StreamInfo::StreamInfoImpl stream_info_; diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index b59a2bf04a4c..4f9da9175fc1 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -94,6 +94,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { !config_->failureModeAllow())) { config_->stats().cx_closed_.inc(); filter_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + filter_callbacks_->connection().streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UnauthorizedExternalService); + filter_callbacks_->connection().streamInfo().setResponseCodeDetails( + response->status == Filters::Common::ExtAuthz::CheckStatus::Denied + ? Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzDenied + : Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzError); } else { // Let the filter chain continue. filter_return_ = FilterReturn::Continue; diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 0902220618ac..58d689ff66fc 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -120,10 +120,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ "//envoy/upstream:upstream_interface", "//source/extensions/common/redis:cluster_refresh_manager_lib", diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 335b62d0192c..9549110461d3 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -20,7 +20,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/tcp_proxy", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -33,6 +33,6 @@ envoy_cc_extension( "//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h index f2aea2c4b85f..628181f775fa 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.validate.h" #include "source/extensions/filters/network/common/factory_base.h" #include "source/extensions/filters/network/well_known_names.h" @@ -12,7 +12,7 @@ namespace NetworkFilters { namespace SniDynamicForwardProxy { using FilterConfig = - envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig; + envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3::FilterConfig; /** * Config registration for the sni_dynamic_forward_proxy filter. @see diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h index e7f0de159d20..23785a275090 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" @@ -13,7 +13,7 @@ namespace NetworkFilters { namespace SniDynamicForwardProxy { using FilterConfig = - envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3alpha::FilterConfig; + envoy::extensions::filters::network::sni_dynamic_forward_proxy::v3::FilterConfig; class ProxyFilterConfig { public: diff --git a/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h index c15d50e3de9d..7f931199d31b 100644 --- a/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/auto_protocol_impl.h @@ -6,6 +6,7 @@ #include "source/common/common/fmt.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -33,6 +34,9 @@ class AutoProtocolImpl : public Protocol { bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override { + return protocol_->peekReplyPayload(buffer, reply_type); + } bool readStructBegin(Buffer::Instance& buffer, std::string& name) override { return protocol_->readStructBegin(buffer, name); } diff --git a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc index 805ec973e021..961f89a8277a 100644 --- a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.cc @@ -60,6 +60,33 @@ bool BinaryProtocolImpl::readMessageEnd(Buffer::Instance& buffer) { return true; } +bool BinaryProtocolImpl::peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) { + // binary protocol does not transmit struct names so go straight to peek at field begin + // FieldType::Stop is encoded as 1 byte. + if (buffer.length() < 1) { + return false; + } + + FieldType type = static_cast(buffer.peekInt()); + if (type == FieldType::Stop) { + // If the first field is stop then response is void success + reply_type = ReplyType::Success; + return true; + } + + if (buffer.length() < 3) { + return false; + } + + int16_t id = buffer.peekBEInt(1); + if (id < 0) { + throw EnvoyException(absl::StrCat("invalid binary protocol field id ", id)); + } + // successful response struct in field id 0, error (IDL exception) in field id greater than 0 + reply_type = id == 0 ? ReplyType::Success : ReplyType::Error; + return true; +} + bool BinaryProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) { UNREFERENCED_PARAMETER(buffer); name.clear(); // binary protocol does not transmit struct names diff --git a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h index a3497aa70a6d..1550e8d22610 100644 --- a/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/binary_protocol_impl.h @@ -6,6 +6,7 @@ #include "envoy/common/pure.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -25,6 +26,7 @@ class BinaryProtocolImpl : public Protocol { ProtocolType type() const override { return ProtocolType::Binary; } bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override; bool readStructBegin(Buffer::Instance& buffer, std::string& name) override; bool readStructEnd(Buffer::Instance& buffer) override; bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type, diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc index 4bab4da2e625..3aef9e205aa7 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc @@ -79,6 +79,46 @@ bool CompactProtocolImpl::readMessageEnd(Buffer::Instance& buffer) { return true; } +bool CompactProtocolImpl::peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) { + // compact protocol does not transmit struct names so go straight to peek for field begin + // Minimum size: FieldType::Stop is encoded as 1 byte. + if (buffer.length() < 1) { + return false; + } + + uint8_t delta_and_type = buffer.peekInt(); + if ((delta_and_type & 0x0f) == 0) { + // Type is stop, no need to do further decoding + // If the first field is stop then response is void success + reply_type = ReplyType::Success; + return true; + } + + if ((delta_and_type >> 4) != 0) { + // field id delta is non zero and so is an IDL exception (success field id is 0) + reply_type = ReplyType::Error; + return true; + } + + int id_size = 0; + // Field ID delta is zero: this is a long-form field header, followed by zig-zag field id. + if (buffer.length() < 2) { + return false; + } + + int32_t id = BufferHelper::peekZigZagI32(buffer, 1, id_size); + if (id_size < 0) { + return false; + } + + if (id < 0 || id > std::numeric_limits::max()) { + throw EnvoyException(absl::StrCat("invalid compact protocol field id ", id)); + } + // successful response struct in field id 0, error (IDL exception) in field id greater than 0 + reply_type = id == 0 ? ReplyType::Success : ReplyType::Error; + return true; +} + bool CompactProtocolImpl::readStructBegin(Buffer::Instance& buffer, std::string& name) { UNREFERENCED_PARAMETER(buffer); name.clear(); // compact protocol does not transmit struct names diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h index fb89e613ece3..a2c978bcf2f8 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "source/extensions/filters/network/thrift_proxy/protocol.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" #include "absl/types/optional.h" @@ -28,6 +29,7 @@ class CompactProtocolImpl : public Protocol { ProtocolType type() const override { return ProtocolType::Compact; } bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) override; bool readMessageEnd(Buffer::Instance& buffer) override; + bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) override; bool readStructBegin(Buffer::Instance& buffer, std::string& name) override; bool readStructEnd(Buffer::Instance& buffer) override; bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type, diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 1e100247e59c..531340105ca0 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -210,44 +210,19 @@ bool ConnectionManager::ResponseDecoder::onData(Buffer::Instance& data) { return complete_; } +FilterStatus ConnectionManager::ResponseDecoder::passthroughData(Buffer::Instance& data) { + passthrough_ = true; + return ProtocolConverter::passthroughData(data); +} + FilterStatus ConnectionManager::ResponseDecoder::messageBegin(MessageMetadataSharedPtr metadata) { metadata_ = metadata; metadata_->setSequenceId(parent_.original_sequence_id_); - first_reply_field_ = - (metadata->hasMessageType() && metadata->messageType() == MessageType::Reply); - return ProtocolConverter::messageBegin(metadata); -} - -FilterStatus ConnectionManager::ResponseDecoder::fieldBegin(absl::string_view name, - FieldType& field_type, - int16_t& field_id) { - if (first_reply_field_) { - // Reply messages contain a struct where field 0 is the call result and fields 1+ are - // exceptions, if defined. At most one field may be set. Therefore, the very first field we - // encounter in a reply is either field 0 (success) or not (IDL exception returned). - // If first fieldType is FieldType::Stop then it is a void success and handled in messageEnd() - // because decoder state machine does not call decoder event callback fieldBegin on - // FieldType::Stop. - success_ = (field_id == 0); - first_reply_field_ = false; - } - - return ProtocolConverter::fieldBegin(name, field_type, field_id); -} - -FilterStatus ConnectionManager::ResponseDecoder::messageEnd() { - if (first_reply_field_) { - // When the response is thrift void type there is never a fieldBegin call on a success - // because the response struct has no fields and so the first field type is FieldType::Stop. - // The decoder state machine handles FieldType::Stop by going immediately to structEnd, - // skipping fieldBegin callback. Therefore if we are still waiting for the first reply field - // at end of message then it is a void success. - success_ = true; - first_reply_field_ = false; + if (metadata->hasReplyType()) { + success_ = metadata->replyType() == ReplyType::Success; } - - return ProtocolConverter::messageEnd(); + return ProtocolConverter::messageBegin(metadata); } FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { @@ -275,14 +250,19 @@ FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { cm.read_callbacks_->connection().write(buffer, false); cm.stats_.response_.inc(); + if (passthrough_) { + cm.stats_.response_passthrough_.inc(); + } switch (metadata_->messageType()) { case MessageType::Reply: cm.stats_.response_reply_.inc(); - if (success_.value_or(false)) { - cm.stats_.response_success_.inc(); - } else { - cm.stats_.response_error_.inc(); + if (success_) { + if (success_.value()) { + cm.stats_.response_success_.inc(); + } else { + cm.stats_.response_error_.inc(); + } } break; @@ -419,6 +399,10 @@ void ConnectionManager::ActiveRpc::finalizeRequest() { parent_.stats_.downstream_cx_max_requests_.inc(); } + if (passthrough_) { + parent_.stats_.request_passthrough_.inc(); + } + bool destroy_rpc = false; switch (original_msg_type_) { case MessageType::Call: @@ -458,6 +442,7 @@ bool ConnectionManager::ActiveRpc::passthroughSupported() const { } FilterStatus ConnectionManager::ActiveRpc::passthroughData(Buffer::Instance& data) { + passthrough_ = true; filter_context_ = &data; filter_action_ = [this](DecoderEventHandler* filter) -> FilterStatus { Buffer::Instance* data = absl::any_cast(filter_context_); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 064d29b05060..789d54aaffe3 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -74,17 +74,15 @@ class ConnectionManager : public Network::ReadFilter, struct ResponseDecoder : public DecoderCallbacks, public ProtocolConverter { ResponseDecoder(ActiveRpc& parent, Transport& transport, Protocol& protocol) : parent_(parent), decoder_(std::make_unique(transport, protocol, *this)), - complete_(false), first_reply_field_(false) { + complete_(false), passthrough_{false} { initProtocolConverter(*parent_.parent_.protocol_, parent_.response_buffer_); } bool onData(Buffer::Instance& data); // ProtocolConverter + FilterStatus passthroughData(Buffer::Instance& data) override; FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; - FilterStatus messageEnd() override; - FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, - int16_t& field_id) override; FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { UNREFERENCED_PARAMETER(metadata); return FilterStatus::Continue; @@ -101,7 +99,7 @@ class ConnectionManager : public Network::ReadFilter, MessageMetadataSharedPtr metadata_; absl::optional success_; bool complete_ : 1; - bool first_reply_field_ : 1; + bool passthrough_ : 1; }; using ResponseDecoderPtr = std::unique_ptr; @@ -155,7 +153,7 @@ class ConnectionManager : public Network::ReadFilter, stream_id_(parent_.random_generator_.random()), stream_info_(parent_.time_source_, parent_.read_callbacks_->connection().connectionInfoProviderSharedPtr()), - local_response_sent_{false}, pending_transport_end_{false} { + local_response_sent_{false}, pending_transport_end_{false}, passthrough_{false} { parent_.stats_.request_active_.inc(); } ~ActiveRpc() override { @@ -245,6 +243,7 @@ class ConnectionManager : public Network::ReadFilter, absl::any filter_context_; bool local_response_sent_ : 1; bool pending_transport_end_ : 1; + bool passthrough_ : 1; }; using ActiveRpcPtr = std::unique_ptr; diff --git a/source/extensions/filters/network/thrift_proxy/decoder.cc b/source/extensions/filters/network/thrift_proxy/decoder.cc index 111703e63da5..66c762a65f00 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.cc +++ b/source/extensions/filters/network/thrift_proxy/decoder.cc @@ -6,6 +6,7 @@ #include "source/common/common/assert.h" #include "source/common/common/macros.h" #include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" namespace Envoy { namespace Extensions { @@ -26,19 +27,29 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::passthroughData(Buffer:: } // MessageBegin -> StructBegin +// MessageBegin -> ReplyPayload (reply received, get reply type) DecoderStateMachine::DecoderStatus DecoderStateMachine::messageBegin(Buffer::Instance& buffer) { const auto total = buffer.length(); if (!proto_.readMessageBegin(buffer, *metadata_)) { return {ProtocolState::WaitForData}; } + body_start_ = total - buffer.length(); stack_.clear(); stack_.emplace_back(Frame(ProtocolState::MessageEnd)); + // If a reply peek at the payload to see if success or error (IDL exception) + if (metadata_->hasMessageType() && metadata_->messageType() == MessageType::Reply) { + return {ProtocolState::ReplyPayload, FilterStatus::Continue}; + } + + return handleMessageBegin(); +} +DecoderStateMachine::DecoderStatus DecoderStateMachine::handleMessageBegin() { const auto status = handler_.messageBegin(metadata_); if (callbacks_.passthroughEnabled()) { - body_bytes_ = metadata_->frameSize() - (total - buffer.length()); + body_bytes_ = metadata_->frameSize() - body_start_; return {ProtocolState::PassthroughData, status}; } @@ -54,6 +65,17 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::messageEnd(Buffer::Insta return {ProtocolState::Done, handler_.messageEnd()}; } +// ReplyPayload -> StructBegin +DecoderStateMachine::DecoderStatus DecoderStateMachine::replyPayload(Buffer::Instance& buffer) { + ReplyType reply_type; + if (!proto_.peekReplyPayload(buffer, reply_type)) { + return {ProtocolState::WaitForData}; + } + + metadata_->setReplyType(reply_type); + return handleMessageBegin(); +} + // StructBegin -> FieldBegin DecoderStateMachine::DecoderStatus DecoderStateMachine::structBegin(Buffer::Instance& buffer) { std::string name; @@ -318,6 +340,8 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::handleState(Buffer::Inst return passthroughData(buffer); case ProtocolState::MessageBegin: return messageBegin(buffer); + case ProtocolState::ReplyPayload: + return replyPayload(buffer); case ProtocolState::StructBegin: return structBegin(buffer); case ProtocolState::StructEnd: diff --git a/source/extensions/filters/network/thrift_proxy/decoder.h b/source/extensions/filters/network/thrift_proxy/decoder.h index 99739221c510..f00bea67baba 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.h +++ b/source/extensions/filters/network/thrift_proxy/decoder.h @@ -19,6 +19,7 @@ namespace ThriftProxy { FUNCTION(PassthroughData) \ FUNCTION(MessageBegin) \ FUNCTION(MessageEnd) \ + FUNCTION(ReplyPayload) \ FUNCTION(StructBegin) \ FUNCTION(StructEnd) \ FUNCTION(FieldBegin) \ @@ -134,6 +135,7 @@ class DecoderStateMachine : public Logger::Loggable { DecoderStatus passthroughData(Buffer::Instance& buffer); DecoderStatus messageBegin(Buffer::Instance& buffer); DecoderStatus messageEnd(Buffer::Instance& buffer); + DecoderStatus replyPayload(Buffer::Instance& buffer); DecoderStatus structBegin(Buffer::Instance& buffer); DecoderStatus structEnd(Buffer::Instance& buffer); DecoderStatus fieldBegin(Buffer::Instance& buffer); @@ -150,6 +152,10 @@ class DecoderStateMachine : public Logger::Loggable { DecoderStatus setValue(Buffer::Instance& buffer); DecoderStatus setEnd(Buffer::Instance& buffer); + // handleMessageBegin calls the handler for messageBegin and then determines whether to + // perform payload passthrough or not + DecoderStatus handleMessageBegin(); + // handleValue represents the generic Value state from the state machine documentation. It // returns either ProtocolState::WaitForData if more data is required or the next state. For // structs, lists, maps, or sets the return_state is pushed onto the stack and the next state is @@ -171,6 +177,7 @@ class DecoderStateMachine : public Logger::Loggable { DecoderCallbacks& callbacks_; ProtocolState state_; std::vector stack_; + uint32_t body_start_{}; uint32_t body_bytes_{}; }; diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 08b91a1c4f04..de44db1948b2 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -53,6 +53,10 @@ class MessageMetadata { copy->setMessageType(messageType()); } + if (hasReplyType()) { + copy->setReplyType(replyType()); + } + Http::HeaderMapImpl::copyFrom(copy->headers(), headers()); copy->mutableSpans().assign(spans().begin(), spans().end()); @@ -115,6 +119,10 @@ class MessageMetadata { MessageType messageType() const { return msg_type_.value(); } void setMessageType(MessageType msg_type) { msg_type_ = msg_type; } + bool hasReplyType() const { return reply_type_.has_value(); } + ReplyType replyType() const { return reply_type_.value(); } + void setReplyType(ReplyType reply_type) { reply_type_ = reply_type; } + /** * @return HeaderMap of current headers (never throws) */ @@ -168,6 +176,7 @@ class MessageMetadata { absl::optional method_name_{}; absl::optional seq_id_{}; absl::optional msg_type_{}; + absl::optional reply_type_{}; Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; absl::optional app_ex_type_; absl::optional app_ex_msg_; diff --git a/source/extensions/filters/network/thrift_proxy/protocol.h b/source/extensions/filters/network/thrift_proxy/protocol.h index a9eae7779e1e..13b2586c1b55 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol.h +++ b/source/extensions/filters/network/thrift_proxy/protocol.h @@ -73,6 +73,16 @@ class Protocol { */ virtual bool readMessageEnd(Buffer::Instance& buffer) PURE; + /** + * Peeks the start of a Thrift protocol reply payload in the buffer and updates the reply + * type parameter with the reply type of the payload. + * @param buffer the buffer to peek from + * @param reply_type ReplyType to set the payload's reply type to success or error + * @return true if reply type was successfully read, false if more data is required + * @throw EnvoyException if the data is not a valid payload + */ + virtual bool peekReplyPayload(Buffer::Instance& buffer, ReplyType& reply_type) PURE; + /** * Reads the start of a Thrift struct from the buffer and updates the name parameter with the * value from the struct header. If successful, the struct header is removed from the buffer. diff --git a/source/extensions/filters/network/thrift_proxy/router/config.cc b/source/extensions/filters/network/thrift_proxy/router/config.cc index a6b28cc58c05..bd93949ed747 100644 --- a/source/extensions/filters/network/thrift_proxy/router/config.cc +++ b/source/extensions/filters/network/thrift_proxy/router/config.cc @@ -18,14 +18,15 @@ ThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoT const std::string& stat_prefix, Server::Configuration::FactoryContext& context) { UNREFERENCED_PARAMETER(proto_config); - auto shadow_writer = - std::make_shared(context.clusterManager(), stat_prefix, context.scope(), - context.mainThreadDispatcher(), context.threadLocal()); + auto stats = + std::make_shared(stat_prefix, context.scope(), context.localInfo()); + auto shadow_writer = std::make_shared( + context.clusterManager(), *stats, context.mainThreadDispatcher(), context.threadLocal()); - return [&context, stat_prefix, + return [&context, stats, shadow_writer](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addDecoderFilter(std::make_shared( - context.clusterManager(), stat_prefix, context.scope(), context.runtime(), *shadow_writer)); + callbacks.addDecoderFilter(std::make_shared(context.clusterManager(), *stats, + context.runtime(), *shadow_writer)); }; } diff --git a/source/extensions/filters/network/thrift_proxy/router/router.h b/source/extensions/filters/network/thrift_proxy/router/router.h index dfda11ac3440..dad6882aebc9 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router.h +++ b/source/extensions/filters/network/thrift_proxy/router/router.h @@ -5,6 +5,7 @@ #include #include "envoy/buffer/buffer.h" +#include "envoy/local_info/local_info.h" #include "envoy/router/router.h" #include "envoy/tcp/conn_pool.h" @@ -105,20 +106,30 @@ using ConfigConstSharedPtr = std::shared_ptr; COUNTER(route_missing) \ COUNTER(unknown_cluster) \ COUNTER(upstream_rq_maintenance_mode) \ - COUNTER(no_healthy_upstream) + COUNTER(no_healthy_upstream) \ + COUNTER(shadow_request_submit_failure) -struct RouterStats { +/** + * Struct containing named stats for the router. + */ +struct RouterNamedStats { ALL_THRIFT_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) + + static RouterNamedStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return RouterNamedStats{ALL_THRIFT_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } }; /** - * This interface is used by an upstream request to communicate its state. + * Stats for use in the router. */ -class RequestOwner : public ProtocolConverter, public Logger::Loggable { +class RouterStats { public: - RequestOwner(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, - Stats::Scope& scope) - : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)), + RouterStats(const std::string& stat_prefix, Stats::Scope& scope, + const LocalInfo::LocalInfo& local_info) + : named_(RouterNamedStats::generateStats(stat_prefix, scope)), stat_name_set_(scope.symbolTable().makeSet("thrift_proxy")), symbol_table_(scope.symbolTable()), upstream_rq_call_(stat_name_set_->add("thrift.upstream_rq_call")), @@ -128,164 +139,284 @@ class RequestOwner : public ProtocolConverter, public Logger::Loggableadd("thrift.upstream_resp_success")), upstream_resp_reply_error_(stat_name_set_->add("thrift.upstream_resp_error")), upstream_resp_exception_(stat_name_set_->add("thrift.upstream_resp_exception")), + upstream_resp_exception_local_(stat_name_set_->add("thrift.upstream_resp_exception_local")), + upstream_resp_exception_remote_( + stat_name_set_->add("thrift.upstream_resp_exception_remote")), upstream_resp_invalid_type_(stat_name_set_->add("thrift.upstream_resp_invalid_type")), + upstream_resp_decoding_error_(stat_name_set_->add("thrift.upstream_resp_decoding_error")), upstream_rq_time_(stat_name_set_->add("thrift.upstream_rq_time")), upstream_rq_size_(stat_name_set_->add("thrift.upstream_rq_size")), - upstream_resp_size_(stat_name_set_->add("thrift.upstream_resp_size")) {} - ~RequestOwner() override = default; + upstream_resp_size_(stat_name_set_->add("thrift.upstream_resp_size")), + zone_(stat_name_set_->add("zone")), local_zone_name_(local_info.zoneStatName()) {} /** - * @return ConnectionPool::UpstreamCallbacks& the handler for upstream data. + * Increment counter for request calls. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster */ - virtual Tcp::ConnectionPool::UpstreamCallbacks& upstreamCallbacks() PURE; + void incRequestCall(const Upstream::ClusterInfo& cluster) const { + incClusterScopeCounter(cluster, nullptr, upstream_rq_call_); + } /** - * @return Buffer::OwnedImpl& the buffer used to serialize the upstream request. + * Increment counter for requests that are one way only. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster */ - virtual Buffer::OwnedImpl& buffer() PURE; + void incRequestOneWay(const Upstream::ClusterInfo& cluster) const { + incClusterScopeCounter(cluster, nullptr, upstream_rq_oneway_); + } /** - * @return Event::Dispatcher& the dispatcher used for timers, etc. + * Increment counter for requests that are invalid. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster */ - virtual Event::Dispatcher& dispatcher() PURE; + void incRequestInvalid(const Upstream::ClusterInfo& cluster) const { + incClusterScopeCounter(cluster, nullptr, upstream_rq_invalid_type_); + } /** - * Converts message begin into the right protocol. - */ - void convertMessageBegin(MessageMetadataSharedPtr metadata) { - ProtocolConverter::messageBegin(metadata); + * Increment counter for received responses that are replies that are successful. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host + */ + void incResponseReplySuccess(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host) const { + incClusterScopeCounter(cluster, upstream_host, upstream_resp_reply_); + incClusterScopeCounter(cluster, upstream_host, upstream_resp_reply_success_); + ASSERT(upstream_host != nullptr); + upstream_host->stats().rq_success_.inc(); } /** - * Used to update the request size every time bytes are pushed out. - * - * @param size uint64_t the value of the increment. - */ - virtual void addSize(uint64_t size) PURE; + * Increment counter for received responses that are replies that are an error. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host + */ + void incResponseReplyError(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host) const { + incClusterScopeCounter(cluster, upstream_host, upstream_resp_reply_); + incClusterScopeCounter(cluster, upstream_host, upstream_resp_reply_error_); + ASSERT(upstream_host != nullptr); + // Currently IDL exceptions are always considered endpoint error but it's possible for an error + // to have semantics matching HTTP 4xx, rather than 5xx. rq_error classification chosen + // here to match outlier detection external failure in upstream_request.cc. + upstream_host->stats().rq_error_.inc(); + } /** - * Used to continue decoding if it was previously stopped. + * Increment counter for received remote responses that are exceptions. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host */ - virtual void continueDecoding() PURE; + void incResponseRemoteException(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host) const { + incClusterScopeCounter(cluster, upstream_host, upstream_resp_exception_); + ASSERT(upstream_host != nullptr); + incClusterScopeCounter(cluster, nullptr, upstream_resp_exception_remote_); + upstream_host->stats().rq_error_.inc(); + } /** - * Used to reset the downstream connection after an error. + * Increment counter for responses that are local exceptions, without forwarding a request + * upstream. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster */ - virtual void resetDownstreamConnection() PURE; + void incResponseLocalException(const Upstream::ClusterInfo& cluster) const { + incClusterScopeCounter(cluster, nullptr, upstream_resp_exception_); + incClusterScopeCounter(cluster, nullptr, upstream_resp_exception_local_); + } /** - * Sends a locally generated response using the provided response object. - * - * @param response DirectResponse the response to send to the downstream client - * @param end_stream if true, the downstream connection should be closed after this response - */ - virtual void sendLocalReply(const ThriftProxy::DirectResponse& response, bool end_stream) PURE; + * Increment counter for received responses that are invalid. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host + */ + void incResponseInvalidType(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host) const { + incClusterScopeCounter(cluster, upstream_host, upstream_resp_invalid_type_); + ASSERT(upstream_host != nullptr); + upstream_host->stats().rq_error_.inc(); + } /** - * Records the duration of the request. - * - * @param value uint64_t the value of the duration. - * @param unit Unit the unit of the duration. + * Increment counter for decoding errors during responses. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host */ - virtual void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) PURE; + void incResponseDecodingError(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host) const { + incClusterScopeCounter(cluster, upstream_host, upstream_resp_decoding_error_); + ASSERT(upstream_host != nullptr); + upstream_host->stats().rq_error_.inc(); + } /** - * @return Upstream::ClusterManager& the cluster manager. + * Record a value for the request size histogram. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param value uint64_t size in bytes of the full request */ - Upstream::ClusterManager& clusterManager() { return cluster_manager_; } + void recordUpstreamRequestSize(const Upstream::ClusterInfo& cluster, uint64_t value) const { + recordClusterScopeHistogram(cluster, nullptr, upstream_rq_size_, Stats::Histogram::Unit::Bytes, + value); + } /** - * @return Upstream::Cluster& the upstream cluster associated with the request. + * Record a value for the response size histogram. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param value uint64_t size in bytes of the full response */ - const Upstream::ClusterInfo& cluster() const { return *cluster_; } + void recordUpstreamResponseSize(const Upstream::ClusterInfo& cluster, uint64_t value) const { + recordClusterScopeHistogram(cluster, nullptr, upstream_resp_size_, + Stats::Histogram::Unit::Bytes, value); + } /** - * Common stats. + * Record a value for the response time duration histogram. + * @param cluster Upstream::ClusterInfo& describing the upstream cluster + * @param upstream_host Upstream::HostDescriptionConstSharedPtr describing the upstream host + * @param value uint64_t duration in milliseconds to receive the complete response */ - RouterStats& stats() { return stats_; } + void recordUpstreamResponseTime(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host, + uint64_t value) const { + recordClusterScopeHistogram(cluster, upstream_host, upstream_rq_time_, + Stats::Histogram::Unit::Milliseconds, value); + } + + const RouterNamedStats named_; + +private: + void incClusterScopeCounter(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host, + const Stats::StatName& stat_name) const { + const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join({stat_name}); + cluster.statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); + const Stats::SymbolTable::StoragePtr zone_stat_name_storage = + upstreamZoneStatName(upstream_host, stat_name); + if (zone_stat_name_storage) { + cluster.statsScope().counterFromStatName(Stats::StatName(zone_stat_name_storage.get())).inc(); + } + } + + void recordClusterScopeHistogram(const Upstream::ClusterInfo& cluster, + Upstream::HostDescriptionConstSharedPtr upstream_host, + const Stats::StatName& stat_name, Stats::Histogram::Unit unit, + uint64_t value) const { + const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join({stat_name}); + cluster.statsScope() + .histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit) + .recordValue(value); + const Stats::SymbolTable::StoragePtr zone_stat_name_storage = + upstreamZoneStatName(upstream_host, stat_name); + if (zone_stat_name_storage) { + cluster.statsScope() + .histogramFromStatName(Stats::StatName(zone_stat_name_storage.get()), unit) + .recordValue(value); + } + } + + Stats::SymbolTable::StoragePtr + upstreamZoneStatName(Upstream::HostDescriptionConstSharedPtr upstream_host, + const Stats::StatName& stat_name) const { + if (!upstream_host || local_zone_name_.empty()) { + return nullptr; + } + const auto& upstream_zone_name = upstream_host->localityZoneStatName(); + if (upstream_zone_name.empty()) { + return nullptr; + } + return symbol_table_.join({zone_, local_zone_name_, upstream_zone_name, stat_name}); + } + + Stats::StatNameSetPtr stat_name_set_; + Stats::SymbolTable& symbol_table_; + const Stats::StatName upstream_rq_call_; + const Stats::StatName upstream_rq_oneway_; + const Stats::StatName upstream_rq_invalid_type_; + const Stats::StatName upstream_resp_reply_; + const Stats::StatName upstream_resp_reply_success_; + const Stats::StatName upstream_resp_reply_error_; + const Stats::StatName upstream_resp_exception_; + const Stats::StatName upstream_resp_exception_local_; + const Stats::StatName upstream_resp_exception_remote_; + const Stats::StatName upstream_resp_invalid_type_; + const Stats::StatName upstream_resp_decoding_error_; + const Stats::StatName upstream_rq_time_; + const Stats::StatName upstream_rq_size_; + const Stats::StatName upstream_resp_size_; + const Stats::StatName zone_; + const Stats::StatName local_zone_name_; +}; + +/** + * This interface is used by an upstream request to communicate its state. + */ +class RequestOwner : public ProtocolConverter, public Logger::Loggable { +public: + RequestOwner(Upstream::ClusterManager& cluster_manager, const RouterStats& stats) + : cluster_manager_(cluster_manager), stats_(stats) {} + ~RequestOwner() override = default; /** - * Increment counter for received responses that are replies. + * @return ConnectionPool::UpstreamCallbacks& the handler for upstream data. */ - void incResponseReply(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_resp_reply_}); - } + virtual Tcp::ConnectionPool::UpstreamCallbacks& upstreamCallbacks() PURE; /** - * Increment counter for request calls. + * @return Buffer::OwnedImpl& the buffer used to serialize the upstream request. */ - void incRequestCall(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_rq_call_}); - } + virtual Buffer::OwnedImpl& buffer() PURE; /** - * Increment counter for requests that are one way only. + * @return Event::Dispatcher& the dispatcher used for timers, etc. */ - void incRequestOneWay(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_rq_oneway_}); - } + virtual Event::Dispatcher& dispatcher() PURE; /** - * Increment counter for requests that are invalid. + * Converts message begin into the right protocol. */ - void incRequestInvalid(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_rq_invalid_type_}); + void convertMessageBegin(MessageMetadataSharedPtr metadata) { + ProtocolConverter::messageBegin(metadata); } /** - * Increment counter for received responses that are replies that are successful. + * Used to update the request size every time bytes are pushed out. + * + * @param size uint64_t the value of the increment. */ - void incResponseReplySuccess(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_resp_reply_success_}); - } + virtual void addSize(uint64_t size) PURE; /** - * Increment counter for received responses that are replies that are an error. + * Used to continue decoding if it was previously stopped. */ - void incResponseReplyError(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_resp_reply_error_}); - } + virtual void continueDecoding() PURE; /** - * Increment counter for received responses that are exceptions. + * Used to reset the downstream connection after an error. */ - void incResponseException(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_resp_exception_}); - } + virtual void resetDownstreamConnection() PURE; /** - * Increment counter for received responses that are invalid. + * Sends a locally generated response using the provided response object. + * + * @param response DirectResponse the response to send to the downstream client + * @param end_stream if true, the downstream connection should be closed after this response */ - void incResponseInvalidType(const Upstream::ClusterInfo& cluster) { - incClusterScopeCounter(cluster, {upstream_resp_invalid_type_}); - } + virtual void sendLocalReply(const ThriftProxy::DirectResponse& response, bool end_stream) PURE; /** - * Record a value for the request size histogram. + * @return Upstream::ClusterManager& the cluster manager. */ - void recordUpstreamRequestSize(const Upstream::ClusterInfo& cluster, uint64_t value) { - recordClusterScopeHistogram(cluster, {upstream_rq_size_}, Stats::Histogram::Unit::Bytes, value); - } + Upstream::ClusterManager& clusterManager() { return cluster_manager_; } /** - * Record a value for the response size histogram. + * @return Upstream::Cluster& the upstream cluster associated with the request. */ - void recordUpstreamResponseSize(const Upstream::ClusterInfo& cluster, uint64_t value) { - recordClusterScopeHistogram(cluster, {upstream_resp_size_}, Stats::Histogram::Unit::Bytes, - value); - } + const Upstream::ClusterInfo& cluster() const { return *cluster_; } /** - * Records the duration of the request for a given cluster. - * - * @param cluster ClusterInfo the cluster to record the duration for. - * @param value uint64_t the value of the duration. - * @param unit Unit the unit of the duration. + * @return RouterStats the common router stats. */ - void recordClusterResponseDuration(const Upstream::ClusterInfo& cluster, uint64_t value, - Stats::Histogram::Unit unit) { - recordClusterScopeHistogram(cluster, {upstream_rq_time_}, unit, value); - } + const RouterStats& stats() { return stats_; } protected: struct UpstreamRequestInfo { @@ -308,7 +439,7 @@ class RequestOwner : public ProtocolConverter, public Logger::LoggablemessageType()) { case MessageType::Call: - incRequestCall(*cluster_); + stats().incRequestCall(*cluster_); break; case MessageType::Oneway: - incRequestOneWay(*cluster_); + stats().incRequestOneWay(*cluster_); break; default: - incRequestInvalid(*cluster_); + stats().incRequestInvalid(*cluster_); break; } if (cluster_->maintenanceMode()) { - stats().upstream_rq_maintenance_mode_.inc(); + stats().named_.upstream_rq_maintenance_mode_.inc(); + if (metadata->messageType() == MessageType::Call) { + stats().incResponseLocalException(*cluster_); + } return {AppException(AppExceptionType::InternalError, fmt::format("maintenance mode for cluster '{}'", cluster_name)), absl::nullopt}; @@ -350,14 +484,18 @@ class RequestOwner : public ProtocolConverter, public Logger::LoggabletcpConnPool(Upstream::ResourcePriority::Default, lb_context); if (!conn_pool_data) { - stats().no_healthy_upstream_.inc(); + stats().named_.no_healthy_upstream_.inc(); + if (metadata->messageType() == MessageType::Call) { + stats().incResponseLocalException(*cluster_); + } return {AppException(AppExceptionType::InternalError, fmt::format("no healthy upstream for '{}'", cluster_name)), absl::nullopt}; } const auto passthrough_supported = - transport == TransportType::Framed && final_transport == TransportType::Framed && + (transport == TransportType::Framed || transport == TransportType::Header) && + (final_transport == TransportType::Framed || final_transport == TransportType::Header) && protocol == final_protocol && final_protocol != ProtocolType::Twitter; UpstreamRequestInfo result = {passthrough_supported, final_transport, final_protocol, conn_pool_data}; @@ -367,42 +505,8 @@ class RequestOwner : public ProtocolConverter, public Logger::Loggableroute(); if (!route_) { ENVOY_STREAM_LOG(debug, "no route match for method '{}'", *callbacks_, metadata->methodName()); - stats().route_missing_.inc(); + stats().named_.route_missing_.inc(); callbacks_->sendLocalReply( AppException(AppExceptionType::UnknownMethod, fmt::format("no route for method '{}'", metadata->methodName())), @@ -294,7 +294,7 @@ FilterStatus Router::messageEnd() { ProtocolConverter::messageEnd(); const auto encode_size = upstream_request_->encodeAndWrite(upstream_request_buffer_); addSize(encode_size); - recordUpstreamRequestSize(*cluster_, request_size_); + stats().recordUpstreamRequestSize(*cluster_, request_size_); // Dispatch shadow requests, if any. // Note: if connections aren't ready, the write will happen when appropriate. diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index 0b60226f57dc..78afddfaf397 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -6,12 +6,11 @@ #include "envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h" #include "envoy/router/router.h" -#include "envoy/stats/scope.h" -#include "envoy/stats/stats_macros.h" #include "envoy/tcp/conn_pool.h" #include "envoy/upstream/load_balancer.h" #include "source/common/http/header_utility.h" +#include "source/common/router/metadatamatchcriteria_impl.h" #include "source/common/upstream/load_balancer_impl.h" #include "source/extensions/filters/network/thrift_proxy/conn_manager.h" #include "source/extensions/filters/network/thrift_proxy/filters/filter.h" @@ -217,10 +216,10 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, public RequestOwner, public ThriftFilters::DecoderFilter { public: - Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, - Stats::Scope& scope, Runtime::Loader& runtime, ShadowWriter& shadow_writer) - : RequestOwner(cluster_manager, stat_prefix, scope), passthrough_supported_(false), - runtime_(runtime), shadow_writer_(shadow_writer) {} + Router(Upstream::ClusterManager& cluster_manager, const RouterStats& stats, + Runtime::Loader& runtime, ShadowWriter& shadow_writer) + : RequestOwner(cluster_manager, stats), passthrough_supported_(false), runtime_(runtime), + shadow_writer_(shadow_writer) {} ~Router() override = default; @@ -239,9 +238,6 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, void sendLocalReply(const ThriftProxy::DirectResponse& response, bool end_stream) override { callbacks_->sendLocalReply(response, end_stream); } - void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) override { - recordClusterResponseDuration(*cluster_, value, unit); - } // RequestOwner::ProtocolConverter FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override; @@ -271,7 +267,25 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, // Upstream::LoadBalancerContext const Network::Connection* downstreamConnection() const override; const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { - return route_entry_ ? route_entry_->metadataMatchCriteria() : nullptr; + const Envoy::Router::MetadataMatchCriteria* route_criteria = + (route_entry_ != nullptr) ? route_entry_->metadataMatchCriteria() : nullptr; + + // Support getting metadata match criteria from thrift request. + const auto& request_metadata = callbacks_->streamInfo().dynamicMetadata().filter_metadata(); + const auto filter_it = request_metadata.find(Envoy::Config::MetadataFilters::get().ENVOY_LB); + + if (filter_it == request_metadata.end()) { + return route_criteria; + } + + if (route_criteria != nullptr) { + metadata_match_criteria_ = route_criteria->mergeMatchCriteria(filter_it->second); + } else { + metadata_match_criteria_ = + std::make_unique(filter_it->second); + } + + return metadata_match_criteria_.get(); } // Tcp::ConnectionPool::UpstreamCallbacks @@ -287,6 +301,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, std::unique_ptr upstream_response_callbacks_{}; RouteConstSharedPtr route_{}; const RouteEntry* route_entry_{}; + Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; std::unique_ptr upstream_request_; Buffer::OwnedImpl upstream_request_buffer_; diff --git a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc index abd51e1da50f..68df0aae71bf 100644 --- a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc @@ -21,7 +21,7 @@ ShadowWriterImpl::submit(const std::string& cluster_name, MessageMetadataSharedP original_transport, original_protocol); const bool created = shadow_router->createUpstreamRequest(); if (!created) { - stats_.shadow_request_submit_failure_.inc(); + stats_.named_.shadow_request_submit_failure_.inc(); return absl::nullopt; } @@ -34,7 +34,7 @@ ShadowWriterImpl::submit(const std::string& cluster_name, MessageMetadataSharedP ShadowRouterImpl::ShadowRouterImpl(ShadowWriterImpl& parent, const std::string& cluster_name, MessageMetadataSharedPtr& metadata, TransportType transport_type, ProtocolType protocol_type) - : RequestOwner(parent.clusterManager(), parent.statPrefix(), parent.scope()), parent_(parent), + : RequestOwner(parent.clusterManager(), parent.stats()), parent_(parent), cluster_name_(cluster_name), metadata_(metadata->clone()), transport_type_(transport_type), protocol_type_(protocol_type), transport_(NamedTransportConfigFactory::getFactory(transport_type).createTransport()), @@ -224,7 +224,7 @@ FilterStatus ShadowRouterImpl::messageEnd() { ProtocolConverter::messageEnd(); const auto encode_size = upstream_request_->encodeAndWrite(upstream_request_buffer_); addSize(encode_size); - recordUpstreamRequestSize(*cluster_, request_size_); + stats().recordUpstreamRequestSize(*cluster_, request_size_); request_sent_ = true; diff --git a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h index dc43592b58cf..98cdbe537443 100644 --- a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h @@ -4,8 +4,6 @@ #include "envoy/event/dispatcher.h" #include "envoy/router/router.h" -#include "envoy/stats/scope.h" -#include "envoy/stats/stats_macros.h" #include "envoy/tcp/conn_pool.h" #include "envoy/upstream/load_balancer.h" @@ -59,21 +57,8 @@ struct NullResponseDecoder : public DecoderCallbacks, public ProtocolConverter { // ProtocolConverter FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override { metadata_ = metadata; - first_reply_field_ = - (metadata->hasMessageType() && metadata->messageType() == MessageType::Reply); - return FilterStatus::Continue; - } - FilterStatus messageEnd() override { - if (first_reply_field_) { - success_ = true; - first_reply_field_ = false; - } - return FilterStatus::Continue; - } - FilterStatus fieldBegin(absl::string_view, FieldType&, int16_t& field_id) override { - if (first_reply_field_) { - success_ = (field_id == 0); - first_reply_field_ = false; + if (metadata_->hasReplyType()) { + success_ = metadata_->replyType() == ReplyType::Success; } return FilterStatus::Continue; } @@ -97,7 +82,6 @@ struct NullResponseDecoder : public DecoderCallbacks, public ProtocolConverter { MessageMetadataSharedPtr metadata_; absl::optional success_; bool complete_ : 1; - bool first_reply_field_ : 1; }; using NullResponseDecoderPtr = std::unique_ptr; @@ -156,9 +140,6 @@ class ShadowRouterImpl : public ShadowRouterHandle, void continueDecoding() override { flushPendingCallbacks(); } void resetDownstreamConnection() override {} void sendLocalReply(const ThriftProxy::DirectResponse&, bool) override {} - void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) override { - recordClusterResponseDuration(*cluster_, value, unit); - } // RequestOwner::ProtocolConverter FilterStatus transportBegin(MessageMetadataSharedPtr) override { return FilterStatus::Continue; } @@ -230,12 +211,6 @@ class ShadowRouterImpl : public ShadowRouterHandle, bool deferred_deleting_{}; }; -#define ALL_SHADOW_WRITER_STATS(COUNTER, GAUGE, HISTOGRAM) COUNTER(shadow_request_submit_failure) - -struct ShadowWriterStats { - ALL_SHADOW_WRITER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) -}; - class ActiveRouters : public ThreadLocal::ThreadLocalObject { public: ActiveRouters(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} @@ -260,12 +235,9 @@ class ActiveRouters : public ThreadLocal::ThreadLocalObject { class ShadowWriterImpl : public ShadowWriter, Logger::Loggable { public: - ShadowWriterImpl(Upstream::ClusterManager& cm, const std::string& stat_prefix, - Stats::Scope& scope, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls) - : cm_(cm), stat_prefix_(stat_prefix), scope_(scope), dispatcher_(dispatcher), - stats_(generateStats(stat_prefix, scope)), tls_(tls.allocateSlot()) { - + ShadowWriterImpl(Upstream::ClusterManager& cm, const RouterStats& stats, + Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls) + : cm_(cm), stats_(stats), dispatcher_(dispatcher), tls_(tls.allocateSlot()) { tls_->set([](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(dispatcher); }); @@ -274,11 +246,10 @@ class ShadowWriterImpl : public ShadowWriter, Logger::LoggablegetTyped().remove(router); } + const RouterStats& stats() { return stats_; } // Router::ShadowWriter Upstream::ClusterManager& clusterManager() override { return cm_; } - const std::string& statPrefix() const override { return stat_prefix_; } - Stats::Scope& scope() override { return scope_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } absl::optional> submit(const std::string& cluster_name, MessageMetadataSharedPtr metadata, @@ -287,17 +258,9 @@ class ShadowWriterImpl : public ShadowWriter, Logger::LoggablemessageType()) { case MessageType::Reply: - parent_.incResponseReply(cluster); if (callbacks.responseSuccess()) { upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestSuccess); - parent_.incResponseReplySuccess(cluster); + stats_.incResponseReplySuccess(cluster, upstream_host_); } else { upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestFailed); - parent_.incResponseReplyError(cluster); + stats_.incResponseReplyError(cluster, upstream_host_); } break; case MessageType::Exception: upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestFailed); - parent_.incResponseException(cluster); + stats_.incResponseRemoteException(cluster, upstream_host_); break; default: - parent_.incResponseInvalidType(cluster); + stats_.incResponseInvalidType(cluster, upstream_host_); break; } onResponseComplete(); @@ -160,6 +159,7 @@ UpstreamRequest::handleRegularResponse(Buffer::Instance& data, // Note: invalid responses are not accounted in the response size histogram. ENVOY_LOG(debug, "upstream reset"); upstream_host_->outlierDetector().putResult(Upstream::Outlier::Result::ExtOriginRequestFailed); + stats_.incResponseDecodingError(cluster, upstream_host_); resetStream(); } @@ -267,6 +267,7 @@ void UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { switch (reason) { case ConnectionPool::PoolFailureReason::Overflow: + stats_.incResponseLocalException(parent_.cluster()); parent_.sendLocalReply(AppException(AppExceptionType::InternalError, "thrift upstream request: too many connections"), true); @@ -286,6 +287,7 @@ void UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::LocalOriginConnectFailed); } + stats_.incResponseLocalException(parent_.cluster()); // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout) if (!response_started_) { @@ -315,7 +317,7 @@ void UpstreamRequest::chargeResponseTiming() { const std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - parent_.recordResponseDuration(response_time.count(), Stats::Histogram::Unit::Milliseconds); + stats_.recordUpstreamResponseTime(parent_.cluster(), upstream_host_, response_time.count()); } } // namespace Router diff --git a/source/extensions/filters/network/thrift_proxy/router/upstream_request.h b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h index 287610b6b98c..ae51ccddcff2 100644 --- a/source/extensions/filters/network/thrift_proxy/router/upstream_request.h +++ b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h @@ -59,6 +59,7 @@ struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks, void chargeResponseTiming(); RequestOwner& parent_; + const RouterStats& stats_; Upstream::TcpPoolData& conn_pool_data_; MessageMetadataSharedPtr metadata_; diff --git a/source/extensions/filters/network/thrift_proxy/stats.h b/source/extensions/filters/network/thrift_proxy/stats.h index 7e57db76f803..150367e9dfe0 100644 --- a/source/extensions/filters/network/thrift_proxy/stats.h +++ b/source/extensions/filters/network/thrift_proxy/stats.h @@ -22,11 +22,13 @@ namespace ThriftProxy { COUNTER(request_decoding_error) \ COUNTER(request_invalid_type) \ COUNTER(request_oneway) \ + COUNTER(request_passthrough) \ COUNTER(response) \ COUNTER(response_decoding_error) \ COUNTER(response_error) \ COUNTER(response_exception) \ COUNTER(response_invalid_type) \ + COUNTER(response_passthrough) \ COUNTER(response_reply) \ COUNTER(response_success) \ GAUGE(request_active, Accumulate) \ diff --git a/source/extensions/filters/network/thrift_proxy/thrift.h b/source/extensions/filters/network/thrift_proxy/thrift.h index 3092bceb11b7..bfbb31255351 100644 --- a/source/extensions/filters/network/thrift_proxy/thrift.h +++ b/source/extensions/filters/network/thrift_proxy/thrift.h @@ -118,6 +118,14 @@ enum class MessageType { LastMessageType = Oneway, }; +/** + * A Reply message is either a success or an error (IDL exception) + */ +enum class ReplyType { + Success, + Error, +}; + /** * Thrift protocol struct field types. * See https://github.com/apache/thrift/blob/master/lib/cpp/src/thrift/protocol/TProtocol.h diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 8dc000cc2ea1..b6f2e60d1002 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -39,10 +39,11 @@ envoy_cc_library( "//source/common/config:datasource_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/protobuf:message_validator_lib", "//source/common/runtime:runtime_lib", "//source/common/upstream:cluster_manager_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], ) @@ -55,6 +56,6 @@ envoy_cc_extension( ":dns_filter_lib", "//envoy/registry", "//envoy/server:filter_config_interface", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/dns_filter/config.cc b/source/extensions/filters/udp/dns_filter/config.cc index 28f00e6a445f..53745a01f8eb 100644 --- a/source/extensions/filters/udp/dns_filter/config.cc +++ b/source/extensions/filters/udp/dns_filter/config.cc @@ -9,7 +9,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) { auto shared_config = std::make_shared( context, MessageUtil::downcastAndValidate< - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig&>( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, @@ -19,7 +19,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF } ProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } std::string DnsFilterConfigFactory::name() const { return "envoy.filters.udp.dns_filter"; } diff --git a/source/extensions/filters/udp/dns_filter/config.h b/source/extensions/filters/udp/dns_filter/config.h index 9278199a26d6..77723946b8ab 100644 --- a/source/extensions/filters/udp/dns_filter/config.h +++ b/source/extensions/filters/udp/dns_filter/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "envoy/server/filter_config.h" #include "source/extensions/filters/udp/dns_filter/dns_filter.h" diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index 1e869093389d..f93343b4c7dd 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -5,6 +5,7 @@ #include "source/common/config/datasource.h" #include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_utils.h" @@ -18,11 +19,11 @@ static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig& config) : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()), stats_(generateStats(config.stat_prefix(), root_scope_)), resolver_timeout_(DEFAULT_RESOLVER_TIMEOUT), random_(context.api().randomGenerator()) { - using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; + using envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig; const auto& server_config = config.server_config(); @@ -159,21 +160,15 @@ DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( forward_queries_ = config.has_client_config(); if (forward_queries_) { const auto& client_config = config.client_config(); - if (client_config.has_dns_resolution_config()) { - dns_resolver_options_.CopyFrom(client_config.dns_resolution_config().dns_resolver_options()); - if (!client_config.dns_resolution_config().resolvers().empty()) { - const auto& resolver_addrs = client_config.dns_resolution_config().resolvers(); - resolvers_.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers_.push_back(Network::Utility::protobufAddressToAddress(resolver_addr)); - } - } - } - + dns_resolver_factory_ = + &Network::createDnsResolverFactoryFromProto(client_config, typed_dns_resolver_config_); // Set additional resolving options from configuration resolver_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( client_config, resolver_timeout, DEFAULT_RESOLVER_TIMEOUT.count())); max_pending_lookups_ = client_config.max_pending_lookups(); + } else { + // In case client_config doesn't exist, create default DNS resolver factory and save it. + dns_resolver_factory_ = &Network::createDefaultDnsResolverFactory(typed_dns_resolver_config_); } } @@ -189,8 +184,8 @@ void DnsFilterEnvoyConfig::addEndpointToSuffix(const absl::string_view suffix, } bool DnsFilterEnvoyConfig::loadServerConfig( - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig:: - ServerContextConfig& config, + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ServerContextConfig& + config, envoy::data::dns::v3::DnsTable& table) { using envoy::data::dns::v3::DnsTable; @@ -255,11 +250,12 @@ DnsFilter::DnsFilter(Network::UdpReadFilterCallbacks& callbacks, }; resolver_ = std::make_unique( - resolver_callback_, config->resolvers(), config->resolverTimeout(), listener_.dispatcher(), - config->maxPendingLookups(), config->dnsResolverOptions()); + resolver_callback_, config->resolverTimeout(), listener_.dispatcher(), + config->maxPendingLookups(), config->typedDnsResolverConfig(), config->dnsResolverFactory(), + config->api()); } -void DnsFilter::onData(Network::UdpRecvData& client_request) { +Network::FilterStatus DnsFilter::onData(Network::UdpRecvData& client_request) { config_->stats().downstream_rx_bytes_.recordValue(client_request.buffer_->length()); config_->stats().downstream_rx_queries_.inc(); @@ -275,17 +271,19 @@ void DnsFilter::onData(Network::UdpRecvData& client_request) { if (!query_context->parse_status_) { config_->stats().downstream_rx_invalid_queries_.inc(); sendDnsResponse(std::move(query_context)); - return; + return Network::FilterStatus::StopIteration; } // Resolve the requested name and respond to the client. If the return code is // External, we will respond to the client when the upstream resolver returns if (getResponseForQuery(query_context) == DnsLookupResponseCode::External) { - return; + return Network::FilterStatus::StopIteration; } // We have an answer, it might be "No Answer". Send it to the client sendDnsResponse(std::move(query_context)); + + return Network::FilterStatus::StopIteration; } void DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) { @@ -593,9 +591,11 @@ bool DnsFilter::resolveConfiguredService(DnsQueryContextPtr& context, const DnsQ return (targets_discovered != 0); } -void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { +Network::FilterStatus DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { config_->stats().downstream_rx_errors_.inc(); UNREFERENCED_PARAMETER(error_code); + + return Network::FilterStatus::StopIteration; } } // namespace DnsFilter diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index bdcfae1239fd..bd3499833db2 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -1,7 +1,7 @@ #pragma once #include "envoy/event/file_event.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" #include "envoy/network/dns.h" #include "envoy/network/filter.h" @@ -78,22 +78,23 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { public: DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig& config); DnsFilterStats& stats() const { return stats_; } const absl::flat_hash_map& domainTtl() const { return domain_ttl_; } - const AddressConstPtrVec& resolvers() const { return resolvers_; } bool forwardQueries() const { return forward_queries_; } const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } uint64_t retryCount() const { return retry_count_; } Random::RandomGenerator& random() const { return random_; } uint64_t maxPendingLookups() const { return max_pending_lookups_; } - const envoy::config::core::v3::DnsResolverOptions& dnsResolverOptions() const { - return dns_resolver_options_; + const envoy::config::core::v3::TypedExtensionConfig& typedDnsResolverConfig() const { + return typed_dns_resolver_config_; } + const Network::DnsResolverFactory& dnsResolverFactory() const { return *dns_resolver_factory_; } + Api::Api& api() const { return api_; } const TrieLookupTable& getDnsTrie() const { return dns_lookup_trie_; } @@ -105,9 +106,10 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { POOL_HISTOGRAM_PREFIX(scope, final_prefix))}; } - bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha:: - DnsFilterConfig::ServerContextConfig& config, - envoy::data::dns::v3::DnsTable& table); + bool loadServerConfig( + const envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ServerContextConfig& + config, + envoy::data::dns::v3::DnsTable& table); void addEndpointToSuffix(const absl::string_view suffix, const absl::string_view domain_name, DnsEndpointConfig& endpoint_config); @@ -123,11 +125,11 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { absl::flat_hash_map domain_ttl_; bool forward_queries_; uint64_t retry_count_; - AddressConstPtrVec resolvers_; std::chrono::milliseconds resolver_timeout_; Random::RandomGenerator& random_; uint64_t max_pending_lookups_; - envoy::config::core::v3::DnsResolverOptions dns_resolver_options_; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config_; + Network::DnsResolverFactory* dns_resolver_factory_; }; using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; @@ -145,8 +147,8 @@ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { public: - DnsFilterResolver(DnsFilterResolverCallback& callback, AddressConstPtrVec resolvers, - std::chrono::milliseconds timeout, Event::Dispatcher& dispatcher, - uint64_t max_pending_lookups, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) + DnsFilterResolver(DnsFilterResolverCallback& callback, std::chrono::milliseconds timeout, + Event::Dispatcher& dispatcher, uint64_t max_pending_lookups, + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config, + const Network::DnsResolverFactory& dns_resolver_factory, Api::Api& api) : timeout_(timeout), dispatcher_(dispatcher), - resolver_(dispatcher.createDnsResolver(resolvers, dns_resolver_options)), + resolver_( + dns_resolver_factory.createDnsResolver(dispatcher, api, typed_dns_resolver_config)), callback_(callback), max_pending_lookups_(max_pending_lookups) {} /** * @brief entry point to resolve the name in a DnsQueryRecord diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_utils.h b/source/extensions/filters/udp/dns_filter/dns_filter_utils.h index b2e4565c6219..8294930c315d 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter_utils.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter_utils.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" #include "envoy/network/address.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_constants.h" diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc index 4ee826eeb3f7..9be3f1003a37 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc @@ -40,17 +40,19 @@ void UdpProxyFilter::onClusterRemoval(const std::string& cluster) { cluster_info_.reset(); } -void UdpProxyFilter::onData(Network::UdpRecvData& data) { +Network::FilterStatus UdpProxyFilter::onData(Network::UdpRecvData& data) { if (!cluster_info_.has_value()) { config_->stats().downstream_sess_no_route_.inc(); - return; + return Network::FilterStatus::StopIteration; } - cluster_info_.value().onData(data); + return cluster_info_.value().onData(data); } -void UdpProxyFilter::onReceiveError(Api::IoError::IoErrorCode) { +Network::FilterStatus UdpProxyFilter::onReceiveError(Api::IoError::IoErrorCode) { config_->stats().downstream_sess_rx_errors_.inc(); + + return Network::FilterStatus::StopIteration; } UdpProxyFilter::ClusterInfo::ClusterInfo(UdpProxyFilter& filter, @@ -83,7 +85,7 @@ UdpProxyFilter::ClusterInfo::~ClusterInfo() { ASSERT(host_to_sessions_.empty()); } -void UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) { +Network::FilterStatus UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) { const auto active_session_it = sessions_.find(data.addresses_); ActiveSession* active_session; if (active_session_it == sessions_.end()) { @@ -92,7 +94,7 @@ void UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) { .connections() .canCreate()) { cluster_.info()->stats().upstream_cx_overflow_.inc(); - return; + return Network::FilterStatus::StopIteration; } UdpLoadBalancerContext context(filter_.config_->hashPolicy(), data.addresses_.peer_); @@ -100,7 +102,7 @@ void UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) { if (host == nullptr) { ENVOY_LOG(debug, "cannot find any valid host. failed to create a session."); cluster_.info()->stats().upstream_cx_none_healthy_.inc(); - return; + return Network::FilterStatus::StopIteration; } active_session = createSession(std::move(data.addresses_), host); @@ -126,6 +128,8 @@ void UdpProxyFilter::ClusterInfo::onData(Network::UdpRecvData& data) { } active_session->write(*data.buffer_); + + return Network::FilterStatus::StopIteration; } UdpProxyFilter::ActiveSession* diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index fd20e7025dd2..727192c55e96 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -140,8 +140,8 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, const UdpProxyFilterConfigSharedPtr& config); // Network::UdpListenerReadFilter - void onData(Network::UdpRecvData& data) override; - void onReceiveError(Api::IoError::IoErrorCode error_code) override; + Network::FilterStatus onData(Network::UdpRecvData& data) override; + Network::FilterStatus onReceiveError(Api::IoError::IoErrorCode error_code) override; private: class ClusterInfo; @@ -245,7 +245,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, public: ClusterInfo(UdpProxyFilter& filter, Upstream::ThreadLocalCluster& cluster); ~ClusterInfo(); - void onData(Network::UdpRecvData& data); + Network::FilterStatus onData(Network::UdpRecvData& data); void removeSession(const ActiveSession* session); UdpProxyFilter& filter_; diff --git a/source/extensions/grpc_credentials/file_based_metadata/config.cc b/source/extensions/grpc_credentials/file_based_metadata/config.cc index 81fa4aa78343..aebba3b17150 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/config.cc +++ b/source/extensions/grpc_credentials/file_based_metadata/config.cc @@ -69,11 +69,12 @@ FileBasedMetadataAuthenticator::GetMetadata(grpc::string_ref, grpc::string_ref, if (!config_.header_key().empty()) { header_key = config_.header_key(); } - TRY_ASSERT_MAIN_THREAD { + // TODO(#14320): avoid using an exception here or find some way of doing this + // in the main thread. + TRY_NEEDS_AUDIT { std::string header_value = Envoy::Config::DataSource::read(config_.secret_data(), true, api_); metadata->insert(std::make_pair(header_key, header_prefix + header_value)); } - END_TRY catch (const EnvoyException& e) { return grpc::Status(grpc::StatusCode::NOT_FOUND, e.what()); } diff --git a/source/extensions/io_socket/user_space/file_event_impl.cc b/source/extensions/io_socket/user_space/file_event_impl.cc index dcb89c659529..e036525b3ba1 100644 --- a/source/extensions/io_socket/user_space/file_event_impl.cc +++ b/source/extensions/io_socket/user_space/file_event_impl.cc @@ -58,7 +58,7 @@ void FileEventImpl::setEnabled(uint32_t events) { ENVOY_LOG( trace, "User space file event {} set enabled events {} and events {} is active. Will {} reschedule.", - static_cast(this), events, was_enabled ? "not " : ""); + static_cast(this), events, events_to_notify, was_enabled ? "not " : ""); } void FileEventImpl::activateIfEnabled(uint32_t events) { diff --git a/source/extensions/io_socket/user_space/io_handle_impl.cc b/source/extensions/io_socket/user_space/io_handle_impl.cc index 55f425ec542c..83a22bb94d67 100644 --- a/source/extensions/io_socket/user_space/io_handle_impl.cc +++ b/source/extensions/io_socket/user_space/io_handle_impl.cc @@ -150,7 +150,7 @@ Api::IoCallUint64Result IoHandleImpl::writev(const Buffer::RawSlice* slices, uin } if (is_input_empty) { return Api::ioCallUint64ResultNoError(); - }; + } if (!isOpen()) { return {0, Api::IoErrorPtr(new Network::IoSocketError(SOCKET_ERROR_BADF), Network::IoSocketError::deleteIoError)}; diff --git a/source/extensions/network/dns_resolver/apple/BUILD b/source/extensions/network/dns_resolver/apple/BUILD new file mode 100644 index 000000000000..d8873ce661a6 --- /dev/null +++ b/source/extensions/network/dns_resolver/apple/BUILD @@ -0,0 +1,35 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = select({ + "//bazel:apple": ["apple_dns_impl.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "//bazel:apple": ["apple_dns_impl.h"], + "//conditions:default": [], + }), + visibility = ["//visibility:public"], + deps = [ + "//envoy/event:dispatcher_interface", + "//envoy/event:file_event_interface", + "//envoy/event:timer_interface", + "//envoy/network:dns_interface", + "//source/common/common:assert_lib", + "//source/common/common:backoff_lib", + "//source/common/common:linked_object", + "//source/common/network:address_lib", + "//source/common/network:utility_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", + "//source/common/singleton:threadsafe_singleton", + ], +) diff --git a/source/common/network/apple_dns_impl.cc b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc similarity index 80% rename from source/common/network/apple_dns_impl.cc rename to source/extensions/network/dns_resolver/apple/apple_dns_impl.cc index 9a423621acbf..a7bc4d790874 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc @@ -1,4 +1,4 @@ -#include "source/common/network/apple_dns_impl.h" +#include "source/extensions/network/dns_resolver/apple/apple_dns_impl.h" #include @@ -10,10 +10,12 @@ #include "envoy/common/platform.h" #include "envoy/event/file_event.h" +#include "envoy/registry/registry.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/utility.h" #include "absl/strings/str_join.h" @@ -139,7 +141,8 @@ AppleDnsResolverImpl::PendingResolution::PendingResolution(AppleDnsResolverImpl& const std::string& dns_name, DnsLookupFamily dns_lookup_family) : parent_(parent), callback_(callback), dispatcher_(dispatcher), dns_name_(dns_name), - pending_cb_({ResolutionStatus::Success, {}, {}}), dns_lookup_family_(dns_lookup_family) {} + pending_response_({ResolutionStatus::Success, {}, {}, {}}), + dns_lookup_family_(dns_lookup_family) {} AppleDnsResolverImpl::PendingResolution::~PendingResolution() { ENVOY_LOG(debug, "Destroying PendingResolution for {}", dns_name_); @@ -181,7 +184,7 @@ void AppleDnsResolverImpl::PendingResolution::onEventCallback(uint32_t events) { // Similar to receiving an error in onDNSServiceGetAddrInfoReply, an error while processing fd // events indicates that the sd_ref state is broken. // Therefore, finish resolving with an error. - pending_cb_.status_ = ResolutionStatus::Failure; + pending_response_.status_ = ResolutionStatus::Failure; finishResolve(); } } @@ -189,29 +192,39 @@ void AppleDnsResolverImpl::PendingResolution::onEventCallback(uint32_t events) { std::list& AppleDnsResolverImpl::PendingResolution::finalAddressList() { switch (dns_lookup_family_) { case DnsLookupFamily::V4Only: - return pending_cb_.v4_responses_; + return pending_response_.v4_responses_; case DnsLookupFamily::V6Only: - return pending_cb_.v6_responses_; + return pending_response_.v6_responses_; case DnsLookupFamily::Auto: // Per API docs only give v4 if v6 is not available. - if (pending_cb_.v6_responses_.empty()) { - return pending_cb_.v4_responses_; + if (pending_response_.v6_responses_.empty()) { + return pending_response_.v4_responses_; } - return pending_cb_.v6_responses_; + return pending_response_.v6_responses_; case DnsLookupFamily::V4Preferred: // Per API docs only give v6 if v4 is not available. - if (pending_cb_.v4_responses_.empty()) { - return pending_cb_.v6_responses_; + if (pending_response_.v4_responses_.empty()) { + return pending_response_.v6_responses_; } - return pending_cb_.v4_responses_; + return pending_response_.v4_responses_; + case DnsLookupFamily::All: + ASSERT(pending_response_.all_responses_.empty()); + pending_response_.all_responses_.insert(pending_response_.all_responses_.end(), + pending_response_.v4_responses_.begin(), + pending_response_.v4_responses_.end()); + pending_response_.all_responses_.insert(pending_response_.all_responses_.end(), + pending_response_.v6_responses_.begin(), + pending_response_.v6_responses_.end()); + return pending_response_.all_responses_; } NOT_REACHED_GCOVR_EXCL_LINE; } void AppleDnsResolverImpl::PendingResolution::finishResolve() { ENVOY_LOG_EVENT(debug, "apple_dns_resolution_complete", - "dns resolution for {} completed with status {}", dns_name_, pending_cb_.status_); - callback_(pending_cb_.status_, std::move(finalAddressList())); + "dns resolution for {} completed with status {}", dns_name_, + pending_response_.status_); + callback_(pending_response_.status_, std::move(finalAddressList())); if (owned_) { ENVOY_LOG(debug, "Resolution for {} completed (async)", dns_name_); @@ -233,7 +246,20 @@ DNSServiceErrorType AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrIn break; case DnsLookupFamily::Auto: case DnsLookupFamily::V4Preferred: - protocol = kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6; + case DnsLookupFamily::All: + /* We want to make sure we don't get any address that is not routable. Passing 0 + * to apple's `DNSServiceGetAddrInfo` will make a best attempt to filter out IPv6 + * or IPv4 addresses depending on what's routable, per Apple's documentation: + * + * If neither flag is set, the system will apply an intelligent heuristic, which + * is (currently) that it will attempt to look up both, except: + * If "hostname" is a wide-area unicast DNS hostname (i.e. not a ".local." name) but + * this host has no routable IPv6 address, then the call will not try to look up IPv6 + * addresses for "hostname", since any addresses it found would be unlikely to be of + * any use anyway. Similarly, if this host has no routable IPv4 address, the call will + * not try to look up IPv4 addresses for "hostname". + */ + protocol = 0; break; } @@ -277,9 +303,9 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( if (error_code != kDNSServiceErr_NoError) { parent_.chargeGetAddrInfoErrorStats(error_code); - pending_cb_.status_ = ResolutionStatus::Failure; - pending_cb_.v4_responses_.clear(); - pending_cb_.v6_responses_.clear(); + pending_response_.status_ = ResolutionStatus::Failure; + pending_response_.v4_responses_.clear(); + pending_response_.v6_responses_.clear(); finishResolve(); // Note: Nothing can follow this call to flushPendingQueries due to deletion of this @@ -296,10 +322,10 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( ENVOY_LOG(debug, "Address to add address={}, ttl={}", dns_response.address_->ip()->addressAsString(), ttl); if (dns_response.address_->ip()->ipv4()) { - pending_cb_.v4_responses_.push_back(dns_response); + pending_response_.v4_responses_.push_back(dns_response); } else { ASSERT(dns_response.address_->ip()->ipv6()); - pending_cb_.v6_responses_.push_back(dns_response); + pending_response_.v6_responses_.push_back(dns_response); } } @@ -355,5 +381,24 @@ AppleDnsResolverImpl::PendingResolution::buildDnsResponse(const struct sockaddr* } } +// apple DNS resolver factory +class AppleDnsResolverFactory : public DnsResolverFactory { +public: + std::string name() const override { return std::string(AppleDnsResolver); } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{ + new envoy::extensions::network::dns_resolver::apple::v3::AppleDnsResolverConfig()}; + } + DnsResolverSharedPtr + createDnsResolver(Event::Dispatcher& dispatcher, Api::Api& api, + const envoy::config::core::v3::TypedExtensionConfig&) const override { + ASSERT(dispatcher.isThreadSafe()); + return std::make_shared(dispatcher, api.rootScope()); + } +}; + +// Register the AppleDnsResolverFactory +REGISTER_FACTORY(AppleDnsResolverFactory, DnsResolverFactory); + } // namespace Network } // namespace Envoy diff --git a/source/common/network/apple_dns_impl.h b/source/extensions/network/dns_resolver/apple/apple_dns_impl.h similarity index 96% rename from source/common/network/apple_dns_impl.h rename to source/extensions/network/dns_resolver/apple/apple_dns_impl.h index 3eeaba854c16..421d14fe29e7 100644 --- a/source/common/network/apple_dns_impl.h +++ b/source/extensions/network/dns_resolver/apple/apple_dns_impl.h @@ -10,6 +10,7 @@ #include "envoy/event/file_event.h" #include "envoy/event/timer.h" #include "envoy/network/dns.h" +#include "envoy/registry/registry.h" #include "source/common/common/backoff_strategy.h" #include "source/common/common/linked_object.h" @@ -108,10 +109,11 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable v4_responses_; std::list v6_responses_; + std::list all_responses_; }; AppleDnsResolverImpl& parent_; @@ -127,7 +129,7 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable #include @@ -7,11 +7,14 @@ #include #include "envoy/common/platform.h" +#include "envoy/registry/registry.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/common/thread.h" #include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" +#include "source/common/network/resolver_impl.h" #include "source/common/network/utility.h" #include "absl/strings/str_join.h" @@ -104,16 +107,16 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i // ARES_ECONNREFUSED. If the PendingResolution has not been cancelled that means that the // callback_ target _should_ still be around. In that case, raise the callback_ so the target // can be done with this query and initiate a new one. - if (!cancelled_) { - ENVOY_LOG_EVENT(debug, "cares_dns_resolution_destroyed", "dns resolution for {} destroyed", - dns_name_); + ENVOY_LOG_EVENT(debug, "cares_dns_resolution_destroyed", "dns resolution for {} destroyed", + dns_name_); - callback_(ResolutionStatus::Failure, {}); - } - delete this; + // Nothing can follow a call to finishResolve due to the deletion of this object upon + // finishResolve(). + finishResolve(); return; } - if (!fallback_if_failed_) { + + if (!dual_resolution_) { completed_ = true; // If c-ares returns ARES_ECONNREFUSED and there is no fallback we assume that the channel_ is @@ -130,10 +133,9 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i } } - std::list address_list; - ResolutionStatus resolution_status; if (status == ARES_SUCCESS) { - resolution_status = ResolutionStatus::Success; + pending_response_.status_ = ResolutionStatus::Success; + if (addrinfo != nullptr && addrinfo->nodes != nullptr) { if (addrinfo->nodes->ai_family == AF_INET) { for (const ares_addrinfo_node* ai = addrinfo->nodes; ai != nullptr; ai = ai->ai_next) { @@ -143,7 +145,7 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i address.sin_port = 0; address.sin_addr = reinterpret_cast(ai->ai_addr)->sin_addr; - address_list.emplace_back( + pending_response_.address_list_.emplace_back( DnsResponse(std::make_shared(&address), std::chrono::seconds(ai->ai_ttl))); } @@ -154,21 +156,19 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i address.sin6_family = AF_INET6; address.sin6_port = 0; address.sin6_addr = reinterpret_cast(ai->ai_addr)->sin6_addr; - address_list.emplace_back( + pending_response_.address_list_.emplace_back( DnsResponse(std::make_shared(address), std::chrono::seconds(ai->ai_ttl))); } } } - if (!address_list.empty()) { + if (!pending_response_.address_list_.empty() && dns_lookup_family_ != DnsLookupFamily::All) { completed_ = true; } ASSERT(addrinfo != nullptr); ares_freeaddrinfo(addrinfo); - } else { - resolution_status = ResolutionStatus::Failure; } if (timeouts > 0) { @@ -176,44 +176,21 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i } if (completed_) { - if (!cancelled_) { - // Use a raw try here because it is used in both main thread and filter. - // Can not convert to use status code as there may be unexpected exceptions in server fuzz - // tests, which must be handled. Potential exception may come from getAddressWithPort() or - // portFromTcpUrl(). - // TODO(chaoqin-li1123): remove try catch pattern here once we figure how to handle unexpected - // exception in fuzz tests. - ENVOY_LOG_EVENT(debug, "cares_dns_resolution_complete", - "dns resolution for {} completed with status {}", dns_name_, - resolution_status); - - TRY_NEEDS_AUDIT { callback_(resolution_status, std::move(address_list)); } - catch (const EnvoyException& e) { - ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); - dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); - } - catch (const std::exception& e) { - ENVOY_LOG(critical, "std::exception in c-ares callback: {}", e.what()); - dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); - } - catch (...) { - ENVOY_LOG(critical, "Unknown exception in c-ares callback"); - dispatcher_.post([] { throw EnvoyException("unknown"); }); - } - } - if (owned_) { - delete this; - return; - } + finishResolve(); + // Nothing can follow a call to finishResolve due to the deletion of this object upon + // finishResolve(). + return; } - if (!completed_ && fallback_if_failed_) { - fallback_if_failed_ = false; + if (dual_resolution_) { + dual_resolution_ = false; + // Perform a second lookup for DnsLookupFamily::Auto and DnsLookupFamily::V4Preferred, given + // that the first lookup failed to return any addresses. Note that DnsLookupFamily::All issues + // both lookups concurrently so there is no need to fire a second lookup here. if (dns_lookup_family_ == DnsLookupFamily::Auto) { getAddrInfo(AF_INET); - } else { - ASSERT(dns_lookup_family_ == DnsLookupFamily::V4Preferred); + } else if (dns_lookup_family_ == DnsLookupFamily::V4Preferred) { getAddrInfo(AF_INET6); } @@ -223,6 +200,40 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i } } +void DnsResolverImpl::PendingResolution::finishResolve() { + if (!cancelled_) { + // Use a raw try here because it is used in both main thread and filter. + // Can not convert to use status code as there may be unexpected exceptions in server fuzz + // tests, which must be handled. Potential exception may come from getAddressWithPort() or + // portFromTcpUrl(). + // TODO(chaoqin-li1123): remove try catch pattern here once we figure how to handle unexpected + // exception in fuzz tests. + ENVOY_LOG_EVENT(debug, "cares_dns_resolution_complete", + "dns resolution for {} completed with status {}", dns_name_, + pending_response_.status_); + + TRY_NEEDS_AUDIT { + callback_(pending_response_.status_, std::move(pending_response_.address_list_)); + } + catch (const EnvoyException& e) { + ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); + dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); + } + catch (const std::exception& e) { + ENVOY_LOG(critical, "std::exception in c-ares callback: {}", e.what()); + dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); + } + catch (...) { + ENVOY_LOG(critical, "Unknown exception in c-ares callback"); + dispatcher_.post([] { throw EnvoyException("unknown"); }); + } + } + if (owned_) { + delete this; + return; + } +} + void DnsResolverImpl::updateAresTimer() { // Update the timeout for events. timeval timeout; @@ -283,15 +294,28 @@ ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, auto pending_resolution = std::make_unique( *this, callback, dispatcher_, channel_, dns_name, dns_lookup_family); if (dns_lookup_family == DnsLookupFamily::Auto || - dns_lookup_family == DnsLookupFamily::V4Preferred) { - pending_resolution->fallback_if_failed_ = true; + dns_lookup_family == DnsLookupFamily::V4Preferred || + dns_lookup_family == DnsLookupFamily::All) { + pending_resolution->dual_resolution_ = true; } - if (dns_lookup_family == DnsLookupFamily::V4Only || - dns_lookup_family == DnsLookupFamily::V4Preferred) { + switch (dns_lookup_family) { + case DnsLookupFamily::V4Only: + case DnsLookupFamily::V4Preferred: + pending_resolution->getAddrInfo(AF_INET); + break; + case DnsLookupFamily::V6Only: + case DnsLookupFamily::Auto: + pending_resolution->getAddrInfo(AF_INET6); + break; + // NOTE: DnsLookupFamily::All performs both lookups concurrently as addresses from both families + // are being requested. + case DnsLookupFamily::All: pending_resolution->getAddrInfo(AF_INET); - } else { pending_resolution->getAddrInfo(AF_INET6); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } if (pending_resolution->completed_) { @@ -328,5 +352,41 @@ void DnsResolverImpl::PendingResolution::getAddrInfo(int family) { this); } +// c-ares DNS resolver factory +class CaresDnsResolverFactory : public DnsResolverFactory { +public: + std::string name() const override { return std::string(CaresDnsResolver); } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{ + new envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig()}; + } + + DnsResolverSharedPtr createDnsResolver(Event::Dispatcher& dispatcher, Api::Api&, + const envoy::config::core::v3::TypedExtensionConfig& + typed_dns_resolver_config) const override { + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + envoy::config::core::v3::DnsResolverOptions dns_resolver_options; + std::vector resolvers; + + ASSERT(dispatcher.isThreadSafe()); + // Only c-ares DNS factory will call into this function. + // Directly unpack the typed config to a c-ares object. + Envoy::MessageUtil::unpackTo(typed_dns_resolver_config.typed_config(), cares); + dns_resolver_options.MergeFrom(cares.dns_resolver_options()); + if (!cares.resolvers().empty()) { + const auto& resolver_addrs = cares.resolvers(); + resolvers.reserve(resolver_addrs.size()); + for (const auto& resolver_addr : resolver_addrs) { + resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); + } + } + return std::make_shared(dispatcher, resolvers, dns_resolver_options); + } +}; + +// Register the CaresDnsResolverFactory +REGISTER_FACTORY(CaresDnsResolverFactory, DnsResolverFactory); + } // namespace Network } // namespace Envoy diff --git a/source/common/network/dns_impl.h b/source/extensions/network/dns_resolver/cares/dns_impl.h similarity index 80% rename from source/common/network/dns_impl.h rename to source/extensions/network/dns_resolver/cares/dns_impl.h index 059d82073f58..cfb637b5b172 100644 --- a/source/common/network/dns_impl.h +++ b/source/extensions/network/dns_resolver/cares/dns_impl.h @@ -7,6 +7,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/file_event.h" #include "envoy/network/dns.h" +#include "envoy/registry/registry.h" #include "source/common/common/linked_object.h" #include "source/common/common/logger.h" @@ -65,6 +66,15 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggable address_list_; + }; + DnsResolverImpl& parent_; // Caller supplied callback to invoke on query completion or error. const ResolveCb callback_; @@ -77,12 +87,18 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggable resolvers_csv_; }; +DECLARE_FACTORY(CaresDnsResolverFactory); + } // namespace Network } // namespace Envoy diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index fab256cc7c21..2bcd37f19312 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -112,12 +112,22 @@ void UdpStatsdSink::onHistogramComplete(const Stats::Histogram& histogram, uint6 // are timers but record in units other than milliseconds, it may make sense to scale the value to // milliseconds here and potentially suffix the names accordingly (minus the pre-existing ones for // backwards compatibility). - const std::string message = - buildMessage(histogram, std::chrono::milliseconds(value).count(), "|ms"); + std::string message; + if (histogram.unit() == Stats::Histogram::Unit::Percent) { + // 32-bit floating point values should have plenty of range for these values, and are faster to + // operate on than 64-bit doubles. + constexpr float divisor = Stats::Histogram::PercentScale; + const float float_value = value; + const float scaled = float_value / divisor; + message = buildMessage(histogram, scaled, "|h"); + } else { + message = buildMessage(histogram, std::chrono::milliseconds(value).count(), "|ms"); + } tls_->getTyped().write(message); } -const std::string UdpStatsdSink::buildMessage(const Stats::Metric& metric, uint64_t value, +template +const std::string UdpStatsdSink::buildMessage(const Stats::Metric& metric, ValueType value, const std::string& type) const { switch (tag_format_.tag_position) { case Statsd::TagPosition::TagAfterValue: { @@ -200,6 +210,21 @@ void TcpStatsdSink::flush(Stats::MetricSnapshot& snapshot) { tls_sink.endFlush(true); } +void TcpStatsdSink::onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) { + // For statsd histograms are all timers except percents. + if (histogram.unit() == Stats::Histogram::Unit::Percent) { + // 32-bit floating point values should have plenty of range for these values, and are faster to + // operate on than 64-bit doubles. + constexpr float divisor = Stats::Histogram::PercentScale; + const float float_value = value; + const float scaled = float_value / divisor; + tls_->getTyped().onPercentHistogramComplete(histogram.name(), scaled); + } else { + tls_->getTyped().onTimespanComplete(histogram.name(), + std::chrono::milliseconds(value)); + } +} + TcpStatsdSink::TlsSink::TlsSink(TcpStatsdSink& parent, Event::Dispatcher& dispatcher) : parent_(parent), dispatcher_(dispatcher) {} @@ -288,6 +313,12 @@ void TcpStatsdSink::TlsSink::onTimespanComplete(const std::string& name, write(buffer); } +void TcpStatsdSink::TlsSink::onPercentHistogramComplete(const std::string& name, float value) { + ASSERT(current_slice_mem_ == nullptr); + Buffer::OwnedImpl buffer(fmt::format("{}.{}:{}|h\n", parent_.getPrefix().c_str(), name, value)); + write(buffer); +} + void TcpStatsdSink::TlsSink::write(Buffer::Instance& buffer) { // Guard against the stats connection backing up. In this case we probably have no visibility // into what is going on externally, but we also increment a stat that should be viewable diff --git a/source/extensions/stat_sinks/common/statsd/statsd.h b/source/extensions/stat_sinks/common/statsd/statsd.h index 9237b0368811..e1a5cd4e46ae 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.h +++ b/source/extensions/stat_sinks/common/statsd/statsd.h @@ -85,7 +85,8 @@ class UdpStatsdSink : public Stats::Sink { void flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const; void writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, const std::string& data) const; - const std::string buildMessage(const Stats::Metric& metric, uint64_t value, + template + const std::string buildMessage(const Stats::Metric& metric, ValueType value, const std::string& type) const; const std::string getName(const Stats::Metric& metric) const; const std::string buildTagStr(const std::vector& tags) const; @@ -110,11 +111,7 @@ class TcpStatsdSink : public Stats::Sink { // Stats::Sink void flush(Stats::MetricSnapshot& snapshot) override; - void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override { - // For statsd histograms are all timers. - tls_->getTyped().onTimespanComplete(histogram.name(), - std::chrono::milliseconds(value)); - } + void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override; const std::string& getPrefix() { return prefix_; } @@ -129,6 +126,7 @@ class TcpStatsdSink : public Stats::Sink { void flushGauge(const std::string& name, uint64_t value); void endFlush(bool do_write); void onTimespanComplete(const std::string& name, std::chrono::milliseconds ms); + void onPercentHistogramComplete(const std::string& name, float value); uint64_t usedBuffer() const; void write(Buffer::Instance& buffer); diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index b3044a98df67..e13753344d6a 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -52,6 +52,7 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_lib", "//source/common/tracing:common_values_lib", + "//source/common/tracing:http_tracer_lib", "//source/common/tracing:null_span_lib", ], ) diff --git a/source/extensions/tracers/xray/localized_sampling.cc b/source/extensions/tracers/xray/localized_sampling.cc index e56cfc52b393..a621daad1614 100644 --- a/source/extensions/tracers/xray/localized_sampling.cc +++ b/source/extensions/tracers/xray/localized_sampling.cc @@ -177,16 +177,16 @@ LocalizedSamplingManifest::LocalizedSamplingManifest(const std::string& rule_jso } bool LocalizedSamplingStrategy::shouldTrace(const SamplingRequest& sampling_request) { - if (!custom_manifest_.hasCustomRules()) { - return shouldTrace(default_manifest_.defaultRule()); + if (!manifest_.hasCustomRules()) { + return shouldTrace(manifest_.defaultRule()); } - for (auto&& rule : custom_manifest_.customRules()) { + for (auto&& rule : manifest_.customRules()) { if (rule.appliesTo(sampling_request)) { return shouldTrace(rule); } } - return shouldTrace(custom_manifest_.defaultRule()); + return shouldTrace(manifest_.defaultRule()); } bool LocalizedSamplingStrategy::shouldTrace(LocalizedSamplingRule& rule) { diff --git a/source/extensions/tracers/xray/localized_sampling.h b/source/extensions/tracers/xray/localized_sampling.h index dc9cde196047..c72bde3f9420 100644 --- a/source/extensions/tracers/xray/localized_sampling.h +++ b/source/extensions/tracers/xray/localized_sampling.h @@ -138,33 +138,21 @@ class LocalizedSamplingStrategy : public SamplingStrategy { public: LocalizedSamplingStrategy(const std::string& sampling_rules_json, Random::RandomGenerator& rng, TimeSource& time_source) - : SamplingStrategy(rng), default_manifest_(LocalizedSamplingManifest::createDefault()), - custom_manifest_(sampling_rules_json), time_source_(time_source), - use_default_(!custom_manifest_.hasCustomRules()) {} + : SamplingStrategy(rng), manifest_(sampling_rules_json), time_source_(time_source) {} /** - * Determines if an incoming request matches one of the sampling rules in the local manifests. + * Determines if an incoming request matches one of the sampling rules in the local manifest. * If a match is found, then the request might be traced based on the sampling percentages etc. * determined by the matching rule. */ bool shouldTrace(const SamplingRequest& sampling_request) override; - /** - * Determines whether default rules are in effect. Mainly for unit testing purposes. - */ - bool usingDefaultManifest() const { return use_default_; } - - /** - * @return the default manifest. Mainly for unit testing purposes. - */ - const LocalizedSamplingManifest& defaultManifest() const { return default_manifest_; } + const LocalizedSamplingManifest manifest() const { return manifest_; } private: bool shouldTrace(LocalizedSamplingRule& rule); - LocalizedSamplingManifest default_manifest_; - LocalizedSamplingManifest custom_manifest_; + LocalizedSamplingManifest manifest_; TimeSource& time_source_; - bool use_default_; }; } // namespace XRay diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 76792fb4cc16..418b3766748f 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -10,6 +10,7 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/protobuf/utility.h" +#include "source/common/tracing/http_tracer_impl.h" #include "source/extensions/tracers/xray/daemon.pb.validate.h" namespace Envoy { @@ -18,7 +19,8 @@ namespace Tracers { namespace XRay { namespace { -constexpr auto XRaySerializationVersion = "1"; +constexpr absl::string_view XRaySerializationVersion = "1"; +constexpr absl::string_view DirectionKey = "direction"; // X-Ray Trace ID Format // @@ -35,19 +37,14 @@ constexpr auto XRaySerializationVersion = "1"; std::string generateTraceId(SystemTime point_in_time, Random::RandomGenerator& random) { using std::chrono::seconds; using std::chrono::time_point_cast; - const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); - std::string out; - out.reserve(35); - out += XRaySerializationVersion; - out.push_back('-'); // epoch in seconds represented as 8 hexadecimal characters - out += Hex::uint32ToHex(epoch); - out.push_back('-'); + const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); std::string uuid = random.uuid(); // unique id represented as 24 hexadecimal digits and no dashes uuid.erase(std::remove(uuid.begin(), uuid.end(), '-'), uuid.end()); ASSERT(uuid.length() >= 24); - out += uuid.substr(0, 24); + const std::string out = + absl::StrCat(XRaySerializationVersion, "-", Hex::uint32ToHex(epoch), "-", uuid.substr(0, 24)); return out; } @@ -93,6 +90,8 @@ void Span::finishSpan() { for (const auto& item : custom_annotations_) { s.mutable_annotations()->insert({item.first, item.second}); } + // `direction` will be either "ingress" or "egress" + s.mutable_annotations()->insert({std::string(DirectionKey), direction()}); const std::string json = MessageUtil::getJsonStringFromMessageOrDie( s, false /* pretty_print */, false /* always_print_primitive_fields */); @@ -106,11 +105,12 @@ void Span::injectContext(Tracing::TraceContext& trace_context) { trace_context.setByReferenceKey(XRayTraceHeader, xray_header_value); } -Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& operation_name, +Tracing::SpanPtr Span::spawnChild(const Tracing::Config& config, const std::string& operation_name, Envoy::SystemTime start_time) { auto child_span = std::make_unique(time_source_, random_, broker_); child_span->setName(name()); child_span->setOperation(operation_name); + child_span->setDirection(Tracing::HttpTracerUtility::toString(config.operationName())); child_span->setStartTime(start_time); child_span->setParentId(id()); child_span->setTraceId(traceId()); @@ -118,12 +118,14 @@ Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& ope return child_span; } -Tracing::SpanPtr Tracer::startSpan(const std::string& operation_name, Envoy::SystemTime start_time, +Tracing::SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& operation_name, + Envoy::SystemTime start_time, const absl::optional& xray_header) { auto span_ptr = std::make_unique(time_source_, random_, *daemon_broker_); span_ptr->setName(segment_name_); span_ptr->setOperation(operation_name); + span_ptr->setDirection(Tracing::HttpTracerUtility::toString(config.operationName())); // Even though we have a TimeSource member in the tracer, we assume the start_time argument has a // more precise value than calling the systemTime() at this point in time. span_ptr->setStartTime(start_time); diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index 623eddd88638..f53c4ca91a12 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -64,6 +64,12 @@ class Span : public Tracing::Span, Logger::Loggable { operation_name_ = std::string(operation); } + /** + * Sets the current direction on the Span. + * This information will be included in the X-Ray span's annotation. + */ + void setDirection(absl::string_view direction) { direction_ = std::string(direction); } + /** * Sets the name of the Span. */ @@ -140,8 +146,16 @@ class Span : public Tracing::Span, Logger::Loggable { */ const std::string& id() const { return id_; } + /** + * Gets this Span's parent ID. + */ const std::string& parentId() const { return parent_segment_id_; } + /** + * Gets this Span's direction. + */ + const std::string& direction() const { return direction_; } + /** * Gets this Span's name. */ @@ -196,6 +210,7 @@ class Span : public Tracing::Span, Logger::Loggable { DaemonBroker& broker_; Envoy::SystemTime start_time_; std::string operation_name_; + std::string direction_; std::string id_; std::string trace_id_; std::string parent_segment_id_; @@ -222,7 +237,8 @@ class Tracer { /** * Starts a tracing span for X-Ray */ - Tracing::SpanPtr startSpan(const std::string& operation_name, Envoy::SystemTime start_time, + Tracing::SpanPtr startSpan(const Tracing::Config&, const std::string& operation_name, + Envoy::SystemTime start_time, const absl::optional& xray_header); /** * Creates a Span that is marked as not-sampled. diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index 34d260d15668..29e377229baa 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -74,7 +74,6 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, // If we have a XRay TraceID in the headers, then we create a SpanContext to pass that trace-id // around if no TraceID (which means no x-ray header) then this is a brand new span. - UNREFERENCED_PARAMETER(config); // TODO(marcomagdy) - how do we factor this into the logic above UNREFERENCED_PARAMETER(tracing_decision); const auto header = trace_context.getByKey(XRayTraceHeader); @@ -106,7 +105,7 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto* tracer = tls_slot_ptr_->getTyped().tracer_.get(); if (should_trace.value()) { - return tracer->startSpan(operation_name, start_time, + return tracer->startSpan(config, operation_name, start_time, header.has_value() ? absl::optional(xray_header) : absl::nullopt); } diff --git a/source/extensions/transport_sockets/tcp_stats/BUILD b/source/extensions/transport_sockets/tcp_stats/BUILD new file mode 100644 index 000000000000..87d14cb75071 --- /dev/null +++ b/source/extensions/transport_sockets/tcp_stats/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "tcp_stats_lib", + srcs = ["tcp_stats.cc"], + hdrs = ["tcp_stats.h"], + deps = [ + "//envoy/buffer:buffer_interface", + "//envoy/event:timer_interface", + "//envoy/network:transport_socket_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/transport_sockets/common:passthrough_lib", + "@envoy_api//envoy/extensions/transport_sockets/tcp_stats/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":tcp_stats_lib", + "//envoy/registry", + "//envoy/server:transport_socket_config_interface", + "//source/common/config:utility_lib", + "@envoy_api//envoy/extensions/transport_sockets/tcp_stats/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/transport_sockets/tcp_stats/config.cc b/source/extensions/transport_sockets/tcp_stats/config.cc new file mode 100644 index 000000000000..a293dd5b977c --- /dev/null +++ b/source/extensions/transport_sockets/tcp_stats/config.cc @@ -0,0 +1,117 @@ +#include "source/extensions/transport_sockets/tcp_stats/config.h" + +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.validate.h" +#include "envoy/registry/registry.h" +#include "envoy/server/transport_socket_config.h" + +#include "source/common/config/utility.h" +#include "source/extensions/transport_sockets/tcp_stats/tcp_stats.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { + +TcpStatsSocketFactory::TcpStatsSocketFactory( + Server::Configuration::TransportSocketFactoryContext& context, + const envoy::extensions::transport_sockets::tcp_stats::v3::Config& config, + Network::TransportSocketFactoryPtr&& inner_factory) + : inner_factory_(std::move(inner_factory)) { +#if defined(__linux__) + config_ = std::make_shared(config, context.scope()); +#else + UNREFERENCED_PARAMETER(config); + UNREFERENCED_PARAMETER(context); + throw EnvoyException("envoy.transport_sockets.tcp_stats is not supported on this platform."); +#endif +} + +Network::TransportSocketPtr TcpStatsSocketFactory::createTransportSocket( + Network::TransportSocketOptionsConstSharedPtr options) const { +#if defined(__linux__) + auto inner_socket = inner_factory_->createTransportSocket(options); + if (inner_socket == nullptr) { + return nullptr; + } + return std::make_unique(config_, std::move(inner_socket)); +#else + UNREFERENCED_PARAMETER(options); + return nullptr; +#endif +} + +bool TcpStatsSocketFactory::implementsSecureTransport() const { + return inner_factory_->implementsSecureTransport(); +} + +bool TcpStatsSocketFactory::usesProxyProtocolOptions() const { + return inner_factory_->usesProxyProtocolOptions(); +} + +class TcpStatsConfigFactory : public virtual Server::Configuration::TransportSocketConfigFactory { +public: + std::string name() const override { return "envoy.transport_sockets.tcp_stats"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } +}; + +class UpstreamTcpStatsConfigFactory + : public Server::Configuration::UpstreamTransportSocketConfigFactory, + public TcpStatsConfigFactory { +public: + Network::TransportSocketFactoryPtr createTransportSocketFactory( + const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context) override { + const auto& outer_config = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::tcp_stats::v3::Config&>( + config, context.messageValidationVisitor()); + auto& inner_config_factory = Envoy::Config::Utility::getAndCheckFactory< + Server::Configuration::UpstreamTransportSocketConfigFactory>( + outer_config.transport_socket()); + ProtobufTypes::MessagePtr inner_factory_config = + Envoy::Config::Utility::translateToFactoryConfig(outer_config.transport_socket(), + context.messageValidationVisitor(), + inner_config_factory); + auto inner_transport_factory = + inner_config_factory.createTransportSocketFactory(*inner_factory_config, context); + return std::make_unique(context, outer_config, + std::move(inner_transport_factory)); + } +}; + +class DownstreamTcpStatsConfigFactory + : public Server::Configuration::DownstreamTransportSocketConfigFactory, + public TcpStatsConfigFactory { +public: + Network::TransportSocketFactoryPtr + createTransportSocketFactory(const Protobuf::Message& config, + Server::Configuration::TransportSocketFactoryContext& context, + const std::vector& server_names) override { + const auto& outer_config = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::tcp_stats::v3::Config&>( + config, context.messageValidationVisitor()); + auto& inner_config_factory = Envoy::Config::Utility::getAndCheckFactory< + Server::Configuration::DownstreamTransportSocketConfigFactory>( + outer_config.transport_socket()); + ProtobufTypes::MessagePtr inner_factory_config = + Envoy::Config::Utility::translateToFactoryConfig(outer_config.transport_socket(), + context.messageValidationVisitor(), + inner_config_factory); + auto inner_transport_factory = inner_config_factory.createTransportSocketFactory( + *inner_factory_config, context, server_names); + return std::make_unique(context, outer_config, + std::move(inner_transport_factory)); + } +}; + +REGISTER_FACTORY(UpstreamTcpStatsConfigFactory, + Server::Configuration::UpstreamTransportSocketConfigFactory); + +REGISTER_FACTORY(DownstreamTcpStatsConfigFactory, + Server::Configuration::DownstreamTransportSocketConfigFactory); + +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tcp_stats/config.h b/source/extensions/transport_sockets/tcp_stats/config.h new file mode 100644 index 000000000000..1b5fd20c1038 --- /dev/null +++ b/source/extensions/transport_sockets/tcp_stats/config.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.h" +#include "envoy/server/transport_socket_config.h" + +#include "source/extensions/transport_sockets/tcp_stats/tcp_stats.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { + +class TcpStatsSocketFactory : public Network::TransportSocketFactory { +public: + TcpStatsSocketFactory(Server::Configuration::TransportSocketFactoryContext& context, + const envoy::extensions::transport_sockets::tcp_stats::v3::Config& config, + Network::TransportSocketFactoryPtr&& inner_factory); + + Network::TransportSocketPtr + createTransportSocket(Network::TransportSocketOptionsConstSharedPtr options) const override; + bool implementsSecureTransport() const override; + bool usesProxyProtocolOptions() const override; + +private: + Network::TransportSocketFactoryPtr inner_factory_; +#if defined(__linux__) + ConfigConstSharedPtr config_; +#endif +}; + +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tcp_stats/tcp_stats.cc b/source/extensions/transport_sockets/tcp_stats/tcp_stats.cc new file mode 100644 index 000000000000..464e32be9531 --- /dev/null +++ b/source/extensions/transport_sockets/tcp_stats/tcp_stats.cc @@ -0,0 +1,158 @@ +#if defined(__linux__) + +// `struct tcp_info` is defined in two places: /usr/include/netinet/tcp.h (included from +// envoy/common/platform.h) and /usr/include/linux/tcp.h. The former version is older and doesn't +// contain all the fields needed. Including both headers results in a compilation error due to the +// duplicate (and different) definitions of `struct tcp_info`. To work around this, define +// `DO_NOT_INCLUDE_NETINET_TCP_H` to prevent inclusion of the wrong version. +#define DO_NOT_INCLUDE_NETINET_TCP_H 1 + +#include "source/extensions/transport_sockets/tcp_stats/tcp_stats.h" + +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/network/connection.h" + +#include "source/common/common/assert.h" +#include "source/common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { + +Config::Config(const envoy::extensions::transport_sockets::tcp_stats::v3::Config& config_proto, + Stats::Scope& scope) + : stats_(generateStats(scope)), + update_period_(PROTOBUF_GET_OPTIONAL_MS(config_proto, update_period)) {} + +TcpStats Config::generateStats(Stats::Scope& scope) { + const std::string prefix("tcp_stats"); + return TcpStats{ALL_TCP_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; +} + +TcpStatsSocket::TcpStatsSocket(ConfigConstSharedPtr config, + Network::TransportSocketPtr inner_socket) + : PassthroughSocket(std::move(inner_socket)), config_(std::move(config)) {} + +void TcpStatsSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) { + callbacks_ = &callbacks; + transport_socket_->setTransportSocketCallbacks(callbacks); +} + +void TcpStatsSocket::onConnected() { + if (config_->update_period_.has_value()) { + timer_ = callbacks_->connection().dispatcher().createTimer([this]() { + recordStats(); + timer_->enableTimer(config_->update_period_.value()); + }); + timer_->enableTimer(config_->update_period_.value()); + } + + transport_socket_->onConnected(); +} + +void TcpStatsSocket::closeSocket(Network::ConnectionEvent event) { + // Record final values. + recordStats(); + + // Ensure gauges are zero'd out at the end of a connection no matter what the OS told us. + if (last_cx_tx_unsent_bytes_ > 0) { + config_->stats_.cx_tx_unsent_bytes_.sub(last_cx_tx_unsent_bytes_); + } + if (last_cx_tx_unacked_segments_ > 0) { + config_->stats_.cx_tx_unacked_segments_.sub(last_cx_tx_unacked_segments_); + } + + if (timer_ != nullptr) { + timer_->disableTimer(); + } + + transport_socket_->closeSocket(event); +} + +absl::optional TcpStatsSocket::querySocketInfo() { + struct tcp_info info; + memset(&info, 0, sizeof(info)); + socklen_t optlen = sizeof(info); + const auto result = callbacks_->ioHandle().getOption(IPPROTO_TCP, TCP_INFO, &info, &optlen); + if ((result.return_value_ != 0) || (optlen < sizeof(info))) { + ENVOY_LOG(debug, "Failed getsockopt(IPPROTO_TCP, TCP_INFO): rc {} errno {} optlen {}", + result.return_value_, result.errno_, optlen); + return absl::nullopt; + } else { + return info; + } +} + +void TcpStatsSocket::recordStats() { + absl::optional tcp_info = querySocketInfo(); + if (!tcp_info.has_value()) { + return; + } + + auto update_counter = [](Stats::Counter& counter, auto& last_value, auto current_value) { + int64_t diff = static_cast(current_value) - static_cast(last_value); + ASSERT(diff >= 0); + if (diff > 0) { + counter.add(diff); + } + last_value = current_value; + }; + + auto update_gauge = [](Stats::Gauge& gauge, auto& last_value, auto current_value) { + static_assert(sizeof(last_value) == sizeof(current_value)); + int64_t diff = static_cast(current_value) - static_cast(last_value); + gauge.add(diff); + last_value = current_value; + }; + + // This is before the update to `cx_tx_data_segments_` and `cx_tx_retransmitted_segments_` because + // they use the same metrics, and `update_counter` will update `last_...`, so this needs to use + // those `last_...` values (and not update them) first. + // + // Don't record a value if the numerator is negative, or the denominator is zero or negative + // (prevent divide-by-zero). + if ((tcp_info->tcpi_data_segs_out > last_cx_tx_data_segments_) && + (tcp_info->tcpi_total_retrans >= last_cx_tx_retransmitted_segments_)) { + // uint32 * uint32 cannot overflow a uint64, so this can safely be done as integer math + // instead of floating point. + static_assert((sizeof(tcp_info->tcpi_total_retrans) == sizeof(uint32_t)) && + (Stats::Histogram::PercentScale < UINT32_MAX)); + + const uint32_t data_segs_out_diff = tcp_info->tcpi_data_segs_out - last_cx_tx_data_segments_; + const uint32_t retransmitted_segs_diff = + tcp_info->tcpi_total_retrans - last_cx_tx_retransmitted_segments_; + const uint64_t percent_retransmissions = + (static_cast(retransmitted_segs_diff) * + static_cast(Stats::Histogram::PercentScale)) / + static_cast(data_segs_out_diff); + config_->stats_.cx_tx_percent_retransmitted_segments_.recordValue(percent_retransmissions); + } + + update_counter(config_->stats_.cx_tx_segments_, last_cx_tx_segments_, tcp_info->tcpi_segs_out); + update_counter(config_->stats_.cx_rx_segments_, last_cx_rx_segments_, tcp_info->tcpi_segs_in); + update_counter(config_->stats_.cx_tx_data_segments_, last_cx_tx_data_segments_, + tcp_info->tcpi_data_segs_out); + update_counter(config_->stats_.cx_rx_data_segments_, last_cx_rx_data_segments_, + tcp_info->tcpi_data_segs_in); + update_counter(config_->stats_.cx_tx_retransmitted_segments_, last_cx_tx_retransmitted_segments_, + tcp_info->tcpi_total_retrans); + + update_gauge(config_->stats_.cx_tx_unsent_bytes_, last_cx_tx_unsent_bytes_, + tcp_info->tcpi_notsent_bytes); + update_gauge(config_->stats_.cx_tx_unacked_segments_, last_cx_tx_unacked_segments_, + tcp_info->tcpi_unacked); + + config_->stats_.cx_rtt_us_.recordValue(tcp_info->tcpi_rtt); + config_->stats_.cx_rtt_variance_us_.recordValue(tcp_info->tcpi_rttvar); +} + +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy +#endif // defined(__linux__) diff --git a/source/extensions/transport_sockets/tcp_stats/tcp_stats.h b/source/extensions/transport_sockets/tcp_stats/tcp_stats.h new file mode 100644 index 000000000000..a5066575f8b7 --- /dev/null +++ b/source/extensions/transport_sockets/tcp_stats/tcp_stats.h @@ -0,0 +1,85 @@ +#pragma once + +#if defined(__linux__) + +#include "envoy/event/timer.h" +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "source/common/common/logger.h" +#include "source/extensions/transport_sockets/common/passthrough.h" + +// Defined in /usr/include/linux/tcp.h. +struct tcp_info; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { + +#define ALL_TCP_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(cx_tx_segments) \ + COUNTER(cx_rx_segments) \ + COUNTER(cx_tx_data_segments) \ + COUNTER(cx_rx_data_segments) \ + COUNTER(cx_tx_retransmitted_segments) \ + GAUGE(cx_tx_unsent_bytes, Accumulate) \ + GAUGE(cx_tx_unacked_segments, Accumulate) \ + HISTOGRAM(cx_tx_percent_retransmitted_segments, Percent) \ + HISTOGRAM(cx_rtt_us, Microseconds) \ + HISTOGRAM(cx_rtt_variance_us, Microseconds) + +struct TcpStats { + ALL_TCP_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) +}; + +class Config { +public: + Config(const envoy::extensions::transport_sockets::tcp_stats::v3::Config& config_proto, + Stats::Scope& scope); + + TcpStats stats_; + const absl::optional update_period_; + +private: + TcpStats generateStats(Stats::Scope& scope); +}; + +using ConfigConstSharedPtr = std::shared_ptr; + +class TcpStatsSocket : public TransportSockets::PassthroughSocket, + Logger::Loggable { +public: + TcpStatsSocket(ConfigConstSharedPtr config, Network::TransportSocketPtr inner_socket); + + // Network::TransportSocket + void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; + void onConnected() override; + void closeSocket(Network::ConnectionEvent event) override; + +private: + absl::optional querySocketInfo(); + void recordStats(); + + const ConfigConstSharedPtr config_; + Network::TransportSocketCallbacks* callbacks_{}; + Event::TimerPtr timer_; + + uint32_t last_cx_tx_segments_{}; + uint32_t last_cx_rx_segments_{}; + uint32_t last_cx_tx_data_segments_{}; + uint32_t last_cx_rx_data_segments_{}; + uint32_t last_cx_tx_retransmitted_segments_{}; + uint32_t last_cx_tx_unsent_bytes_{}; + uint32_t last_cx_tx_unacked_segments_{}; +}; + +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy + +#endif // defined(__linux__) diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 91deb5c164bf..f7912127a864 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -26,12 +26,26 @@ envoy_cc_extension( ], ) +envoy_cc_library( + name = "connection_info_impl_base_lib", + srcs = ["connection_info_impl_base.cc"], + hdrs = ["connection_info_impl_base.h"], + external_deps = ["ssl"], + visibility = ["//visibility:public"], + deps = [ + ":context_lib", + ":utility_lib", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + envoy_cc_library( name = "ssl_handshaker_lib", srcs = ["ssl_handshaker.cc"], hdrs = ["ssl_handshaker.h"], - external_deps = ["ssl"], deps = [ + ":connection_info_impl_base_lib", ":context_lib", ":utility_lib", "//envoy/network:connection_interface", diff --git a/source/extensions/transport_sockets/tls/cert_validator/BUILD b/source/extensions/transport_sockets/tls/cert_validator/BUILD index 6fcffd4d9e5f..ce92df41e80c 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/BUILD +++ b/source/extensions/transport_sockets/tls/cert_validator/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/common:base64_lib", "//source/common/common:hex_lib", + "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:utility_lib", diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc index 6cd624f2e38f..691b5018189d 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc @@ -195,6 +195,7 @@ int DefaultCertValidator::doVerifyCertChain( if (ret <= 0) { stats_.fail_verify_error_.inc(); + ENVOY_LOG(debug, "{}", Utility::getX509VerificationErrorInfo(store_ctx)); return allow_untrusted_certificate_ ? 1 : ret; } } @@ -277,7 +278,7 @@ bool DefaultCertValidator::verifySubjectAltName(X509* cert, for (const GENERAL_NAME* general_name : san_names.get()) { const std::string san = Utility::generalNameAsString(general_name); for (auto& config_san : subject_alt_names) { - if (general_name->type == GEN_DNS ? dnsNameMatch(config_san, san.c_str()) + if (general_name->type == GEN_DNS ? Utility::dnsNameMatch(config_san, san.c_str()) : config_san == san) { return true; } @@ -286,27 +287,6 @@ bool DefaultCertValidator::verifySubjectAltName(X509* cert, return false; } -bool DefaultCertValidator::dnsNameMatch(const absl::string_view dns_name, - const absl::string_view pattern) { - const std::string lower_case_dns_name = absl::AsciiStrToLower(dns_name); - const std::string lower_case_pattern = absl::AsciiStrToLower(pattern); - if (lower_case_dns_name == lower_case_pattern) { - return true; - } - - size_t pattern_len = lower_case_pattern.length(); - if (pattern_len > 1 && lower_case_pattern[0] == '*' && lower_case_pattern[1] == '.') { - if (lower_case_dns_name.length() > pattern_len - 1) { - const size_t off = lower_case_dns_name.length() - pattern_len + 1; - return lower_case_dns_name.substr(0, off).find('.') == std::string::npos && - lower_case_dns_name.substr(off, pattern_len - 1) == - lower_case_pattern.substr(1, pattern_len - 1); - } - } - - return false; -} - bool DefaultCertValidator::matchSubjectAltName( X509* cert, const std::vector>& @@ -323,7 +303,7 @@ bool DefaultCertValidator::matchSubjectAltName( if (general_name->type == GEN_DNS && config_san_matcher.matcher().match_pattern_case() == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact - ? dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) + ? Utility::dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) : config_san_matcher.match(san)) { return true; } diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h index 4d5daaf0205e..0cfe2766d137 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h @@ -14,6 +14,7 @@ #include "envoy/ssl/private_key/private_key.h" #include "envoy/ssl/ssl_socket_extended_info.h" +#include "source/common/common/logger.h" #include "source/common/common/matchers.h" #include "source/common/stats/symbol_table_impl.h" #include "source/extensions/transport_sockets/tls/cert_validator/cert_validator.h" @@ -28,7 +29,7 @@ namespace Extensions { namespace TransportSockets { namespace Tls { -class DefaultCertValidator : public CertValidator { +class DefaultCertValidator : public CertValidator, Logger::Loggable { public: DefaultCertValidator(const Envoy::Ssl::CertificateValidationContextConfig* config, SslStats& stats, TimeSource& time_source); @@ -87,15 +88,6 @@ class DefaultCertValidator : public CertValidator { */ static bool verifySubjectAltName(X509* cert, const std::vector& subject_alt_names); - /** - * Determines whether the given name matches 'pattern' which may optionally begin with a wildcard. - * NOTE: public for testing - * @param dns_name the DNS name to match - * @param pattern the pattern to match against (*.example.com) - * @return true if the san matches pattern - */ - static bool dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern); - /** * Performs subjectAltName matching with the provided matchers. * @param ssl the certificate to verify diff --git a/source/extensions/transport_sockets/tls/connection_info_impl_base.cc b/source/extensions/transport_sockets/tls/connection_info_impl_base.cc new file mode 100644 index 000000000000..de692e42fff4 --- /dev/null +++ b/source/extensions/transport_sockets/tls/connection_info_impl_base.cc @@ -0,0 +1,280 @@ +#include "source/extensions/transport_sockets/tls/connection_info_impl_base.h" + +#include "source/common/common/hex.h" + +#include "absl/strings/str_replace.h" +#include "openssl/err.h" +#include "openssl/x509v3.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace Tls { + +bool ConnectionInfoImplBase::peerCertificatePresented() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + return cert != nullptr; +} + +absl::Span ConnectionInfoImplBase::uriSanLocalCertificate() const { + if (!cached_uri_san_local_certificate_.empty()) { + return cached_uri_san_local_certificate_; + } + + // The cert object is not owned. + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_uri_san_local_certificate_.empty()); + return cached_uri_san_local_certificate_; + } + cached_uri_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); + return cached_uri_san_local_certificate_; +} + +absl::Span ConnectionInfoImplBase::dnsSansLocalCertificate() const { + if (!cached_dns_san_local_certificate_.empty()) { + return cached_dns_san_local_certificate_; + } + + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_dns_san_local_certificate_.empty()); + return cached_dns_san_local_certificate_; + } + cached_dns_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); + return cached_dns_san_local_certificate_; +} + +const std::string& ConnectionInfoImplBase::sha256PeerCertificateDigest() const { + if (!cached_sha_256_peer_certificate_digest_.empty()) { + return cached_sha_256_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_256_peer_certificate_digest_.empty()); + return cached_sha_256_peer_certificate_digest_; + } + + std::vector computed_hash(SHA256_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha256(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_256_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_256_peer_certificate_digest_; +} + +const std::string& ConnectionInfoImplBase::sha1PeerCertificateDigest() const { + if (!cached_sha_1_peer_certificate_digest_.empty()) { + return cached_sha_1_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_1_peer_certificate_digest_.empty()); + return cached_sha_1_peer_certificate_digest_; + } + + std::vector computed_hash(SHA_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_1_peer_certificate_digest_; +} + +const std::string& ConnectionInfoImplBase::urlEncodedPemEncodedPeerCertificate() const { + if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { + return cached_url_encoded_pem_encoded_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); + return cached_url_encoded_pem_encoded_peer_certificate_; + } + + bssl::UniquePtr buf(BIO_new(BIO_s_mem())); + RELEASE_ASSERT(buf != nullptr, ""); + RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert.get()) == 1, ""); + const uint8_t* output; + size_t length; + RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); + absl::string_view pem(reinterpret_cast(output), length); + cached_url_encoded_pem_encoded_peer_certificate_ = absl::StrReplaceAll( + pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}}); + return cached_url_encoded_pem_encoded_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::urlEncodedPemEncodedPeerCertificateChain() const { + if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) { + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); + if (cert_chain == nullptr) { + ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); + return cached_url_encoded_pem_encoded_peer_cert_chain_; + } + + for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) { + X509* cert = sk_X509_value(cert_chain, i); + + bssl::UniquePtr buf(BIO_new(BIO_s_mem())); + RELEASE_ASSERT(buf != nullptr, ""); + RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, ""); + const uint8_t* output; + size_t length; + RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); + + absl::string_view pem(reinterpret_cast(output), length); + cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat( + cached_url_encoded_pem_encoded_peer_cert_chain_, + absl::StrReplaceAll( + pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}})); + } + return cached_url_encoded_pem_encoded_peer_cert_chain_; +} + +absl::Span ConnectionInfoImplBase::uriSanPeerCertificate() const { + if (!cached_uri_san_peer_certificate_.empty()) { + return cached_uri_san_peer_certificate_; + } + + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_uri_san_peer_certificate_.empty()); + return cached_uri_san_peer_certificate_; + } + cached_uri_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); + return cached_uri_san_peer_certificate_; +} + +absl::Span ConnectionInfoImplBase::dnsSansPeerCertificate() const { + if (!cached_dns_san_peer_certificate_.empty()) { + return cached_dns_san_peer_certificate_; + } + + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_dns_san_peer_certificate_.empty()); + return cached_dns_san_peer_certificate_; + } + cached_dns_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); + return cached_dns_san_peer_certificate_; +} + +uint16_t ConnectionInfoImplBase::ciphersuiteId() const { + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); + if (cipher == nullptr) { + return 0xffff; + } + + // From the OpenSSL docs: + // SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to + // get the cipher suite value. + return static_cast(SSL_CIPHER_get_id(cipher)); +} + +std::string ConnectionInfoImplBase::ciphersuiteString() const { + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); + if (cipher == nullptr) { + return {}; + } + + return SSL_CIPHER_get_name(cipher); +} + +const std::string& ConnectionInfoImplBase::tlsVersion() const { + if (!cached_tls_version_.empty()) { + return cached_tls_version_; + } + cached_tls_version_ = SSL_get_version(ssl()); + return cached_tls_version_; +} + +const std::string& ConnectionInfoImplBase::serialNumberPeerCertificate() const { + if (!cached_serial_number_peer_certificate_.empty()) { + return cached_serial_number_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_serial_number_peer_certificate_.empty()); + return cached_serial_number_peer_certificate_; + } + cached_serial_number_peer_certificate_ = Utility::getSerialNumberFromCertificate(*cert.get()); + return cached_serial_number_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::issuerPeerCertificate() const { + if (!cached_issuer_peer_certificate_.empty()) { + return cached_issuer_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_issuer_peer_certificate_.empty()); + return cached_issuer_peer_certificate_; + } + cached_issuer_peer_certificate_ = Utility::getIssuerFromCertificate(*cert); + return cached_issuer_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::subjectPeerCertificate() const { + if (!cached_subject_peer_certificate_.empty()) { + return cached_subject_peer_certificate_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_subject_peer_certificate_.empty()); + return cached_subject_peer_certificate_; + } + cached_subject_peer_certificate_ = Utility::getSubjectFromCertificate(*cert); + return cached_subject_peer_certificate_; +} + +const std::string& ConnectionInfoImplBase::subjectLocalCertificate() const { + if (!cached_subject_local_certificate_.empty()) { + return cached_subject_local_certificate_; + } + X509* cert = SSL_get_certificate(ssl()); + if (!cert) { + ASSERT(cached_subject_local_certificate_.empty()); + return cached_subject_local_certificate_; + } + cached_subject_local_certificate_ = Utility::getSubjectFromCertificate(*cert); + return cached_subject_local_certificate_; +} + +absl::optional ConnectionInfoImplBase::validFromPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + return absl::nullopt; + } + return Utility::getValidFrom(*cert); +} + +absl::optional ConnectionInfoImplBase::expirationPeerCertificate() const { + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + return absl::nullopt; + } + return Utility::getExpirationTime(*cert); +} + +const std::string& ConnectionInfoImplBase::sessionId() const { + if (!cached_session_id_.empty()) { + return cached_session_id_; + } + SSL_SESSION* session = SSL_get_session(ssl()); + if (session == nullptr) { + ASSERT(cached_session_id_.empty()); + return cached_session_id_; + } + + unsigned int session_id_length = 0; + const uint8_t* session_id = SSL_SESSION_get_id(session, &session_id_length); + cached_session_id_ = Hex::encode(session_id, session_id_length); + return cached_session_id_; +} + +} // namespace Tls +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tls/connection_info_impl_base.h b/source/extensions/transport_sockets/tls/connection_info_impl_base.h new file mode 100644 index 000000000000..b591b4733f10 --- /dev/null +++ b/source/extensions/transport_sockets/tls/connection_info_impl_base.h @@ -0,0 +1,64 @@ +#pragma once + +#include + +#include "envoy/ssl/connection.h" + +#include "source/common/common/logger.h" +#include "source/extensions/transport_sockets/tls/utility.h" + +#include "absl/types/optional.h" +#include "openssl/ssl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace Tls { + +// An implementation wraps struct SSL in BoringSSL. +class ConnectionInfoImplBase : public Ssl::ConnectionInfo { +public: + // Ssl::ConnectionInfo + bool peerCertificatePresented() const override; + absl::Span uriSanLocalCertificate() const override; + const std::string& sha256PeerCertificateDigest() const override; + const std::string& sha1PeerCertificateDigest() const override; + const std::string& serialNumberPeerCertificate() const override; + const std::string& issuerPeerCertificate() const override; + const std::string& subjectPeerCertificate() const override; + const std::string& subjectLocalCertificate() const override; + absl::Span uriSanPeerCertificate() const override; + const std::string& urlEncodedPemEncodedPeerCertificate() const override; + const std::string& urlEncodedPemEncodedPeerCertificateChain() const override; + absl::Span dnsSansPeerCertificate() const override; + absl::Span dnsSansLocalCertificate() const override; + absl::optional validFromPeerCertificate() const override; + absl::optional expirationPeerCertificate() const override; + const std::string& sessionId() const override; + uint16_t ciphersuiteId() const override; + std::string ciphersuiteString() const override; + const std::string& tlsVersion() const override; + + virtual SSL* ssl() const PURE; + +protected: + mutable std::vector cached_uri_san_local_certificate_; + mutable std::string cached_sha_256_peer_certificate_digest_; + mutable std::string cached_sha_1_peer_certificate_digest_; + mutable std::string cached_serial_number_peer_certificate_; + mutable std::string cached_issuer_peer_certificate_; + mutable std::string cached_subject_peer_certificate_; + mutable std::string cached_subject_local_certificate_; + mutable std::vector cached_uri_san_peer_certificate_; + mutable std::string cached_url_encoded_pem_encoded_peer_certificate_; + mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_; + mutable std::vector cached_dns_san_peer_certificate_; + mutable std::vector cached_dns_san_local_certificate_; + mutable std::string cached_session_id_; + mutable std::string cached_tls_version_; +}; + +} // namespace Tls +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 4c83c222efdd..aa5fa77be56c 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -169,7 +169,7 @@ ContextConfigImpl::ContextConfigImpl( const unsigned default_min_protocol_version, const unsigned default_max_protocol_version, const std::string& default_cipher_suites, const std::string& default_curves, Server::Configuration::TransportSocketFactoryContext& factory_context) - : api_(factory_context.api()), + : api_(factory_context.api()), options_(factory_context.options()), alpn_protocols_(RepeatedPtrUtil::join(config.alpn_protocols(), ",")), cipher_suites_(StringUtil::nonEmptyStringOrDefault( RepeatedPtrUtil::join(config.tls_params().cipher_suites(), ":"), default_cipher_suites)), @@ -218,7 +218,7 @@ ContextConfigImpl::ContextConfigImpl( } } - HandshakerFactoryContextImpl handshaker_factory_context(api_, alpn_protocols_); + HandshakerFactoryContextImpl handshaker_factory_context(api_, options_, alpn_protocols_); Ssl::HandshakerFactory* handshaker_factory; if (config.has_custom_handshaker()) { // If a custom handshaker is configured, derive the factory from the config. diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 691f9148a8bd..cc36b0e3aa5f 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -68,6 +68,7 @@ class ContextConfigImpl : public virtual Ssl::ContextConfig { const std::string& default_cipher_suites, const std::string& default_curves, Server::Configuration::TransportSocketFactoryContext& factory_context); Api::Api& api_; + const Server::Options& options_; private: static unsigned tlsVersionFromProto( diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index f8870d4dddc8..0afc83d5a67a 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -1171,14 +1171,19 @@ bool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediate error_details = "Failed to verify certificate chain: X509_STORE_CTX_init"; return false; } + // Currently this method is only used to verify server certs, so hard-code "ssl_server" for now. + if (!X509_STORE_CTX_set_default(ctx.get(), "ssl_server") || + !X509_VERIFY_PARAM_set1(X509_STORE_CTX_get0_param(ctx.get()), + SSL_CTX_get0_param(const_cast(ssl_ctx)))) { + error_details = + "Failed to verify certificate chain: fail to setup X509_STORE_CTX or its param."; + return false; + } int res = cert_validator_->doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr); // If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the error details. if (res <= 0 && SSL_CTX_get_verify_mode(ssl_ctx) != SSL_VERIFY_NONE) { - const int n = X509_STORE_CTX_get_error(ctx.get()); - const int depth = X509_STORE_CTX_get_error_depth(ctx.get()); - error_details = absl::StrCat("X509_verify_cert: certificate verification error at depth ", - depth, ": ", X509_verify_cert_error_string(n)); + error_details = Utility::getX509VerificationErrorInfo(ctx.get()); return false; } return true; diff --git a/source/extensions/transport_sockets/tls/ssl_handshaker.cc b/source/extensions/transport_sockets/tls/ssl_handshaker.cc index 5361d7b42bc4..714899cf5f47 100644 --- a/source/extensions/transport_sockets/tls/ssl_handshaker.cc +++ b/source/extensions/transport_sockets/tls/ssl_handshaker.cc @@ -4,14 +4,9 @@ #include "source/common/common/assert.h" #include "source/common/common/empty_string.h" -#include "source/common/common/hex.h" #include "source/common/http/headers.h" #include "source/extensions/transport_sockets/tls/utility.h" -#include "absl/strings/str_replace.h" -#include "openssl/err.h" -#include "openssl/x509v3.h" - using Envoy::Network::PostIoAction; namespace Envoy { @@ -35,190 +30,11 @@ SslHandshakerImpl::SslHandshakerImpl(bssl::UniquePtr ssl, int ssl_extended_ SSL_set_ex_data(ssl_.get(), ssl_extended_socket_info_index, &(this->extended_socket_info_)); } -bool SslHandshakerImpl::peerCertificatePresented() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - return cert != nullptr; -} - bool SslHandshakerImpl::peerCertificateValidated() const { return extended_socket_info_.certificateValidationStatus() == Envoy::Ssl::ClientValidationStatus::Validated; } -absl::Span SslHandshakerImpl::uriSanLocalCertificate() const { - if (!cached_uri_san_local_certificate_.empty()) { - return cached_uri_san_local_certificate_; - } - - // The cert object is not owned. - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_uri_san_local_certificate_.empty()); - return cached_uri_san_local_certificate_; - } - cached_uri_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); - return cached_uri_san_local_certificate_; -} - -absl::Span SslHandshakerImpl::dnsSansLocalCertificate() const { - if (!cached_dns_san_local_certificate_.empty()) { - return cached_dns_san_local_certificate_; - } - - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_dns_san_local_certificate_.empty()); - return cached_dns_san_local_certificate_; - } - cached_dns_san_local_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); - return cached_dns_san_local_certificate_; -} - -const std::string& SslHandshakerImpl::sha256PeerCertificateDigest() const { - if (!cached_sha_256_peer_certificate_digest_.empty()) { - return cached_sha_256_peer_certificate_digest_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_sha_256_peer_certificate_digest_.empty()); - return cached_sha_256_peer_certificate_digest_; - } - - std::vector computed_hash(SHA256_DIGEST_LENGTH); - unsigned int n; - X509_digest(cert.get(), EVP_sha256(), computed_hash.data(), &n); - RELEASE_ASSERT(n == computed_hash.size(), ""); - cached_sha_256_peer_certificate_digest_ = Hex::encode(computed_hash); - return cached_sha_256_peer_certificate_digest_; -} - -const std::string& SslHandshakerImpl::sha1PeerCertificateDigest() const { - if (!cached_sha_1_peer_certificate_digest_.empty()) { - return cached_sha_1_peer_certificate_digest_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_sha_1_peer_certificate_digest_.empty()); - return cached_sha_1_peer_certificate_digest_; - } - - std::vector computed_hash(SHA_DIGEST_LENGTH); - unsigned int n; - X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); - RELEASE_ASSERT(n == computed_hash.size(), ""); - cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); - return cached_sha_1_peer_certificate_digest_; -} - -const std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificate() const { - if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { - return cached_url_encoded_pem_encoded_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); - return cached_url_encoded_pem_encoded_peer_certificate_; - } - - bssl::UniquePtr buf(BIO_new(BIO_s_mem())); - RELEASE_ASSERT(buf != nullptr, ""); - RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert.get()) == 1, ""); - const uint8_t* output; - size_t length; - RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); - absl::string_view pem(reinterpret_cast(output), length); - cached_url_encoded_pem_encoded_peer_certificate_ = absl::StrReplaceAll( - pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}}); - return cached_url_encoded_pem_encoded_peer_certificate_; -} - -const std::string& SslHandshakerImpl::urlEncodedPemEncodedPeerCertificateChain() const { - if (!cached_url_encoded_pem_encoded_peer_cert_chain_.empty()) { - return cached_url_encoded_pem_encoded_peer_cert_chain_; - } - - STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); - if (cert_chain == nullptr) { - ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); - return cached_url_encoded_pem_encoded_peer_cert_chain_; - } - - for (uint64_t i = 0; i < sk_X509_num(cert_chain); i++) { - X509* cert = sk_X509_value(cert_chain, i); - - bssl::UniquePtr buf(BIO_new(BIO_s_mem())); - RELEASE_ASSERT(buf != nullptr, ""); - RELEASE_ASSERT(PEM_write_bio_X509(buf.get(), cert) == 1, ""); - const uint8_t* output; - size_t length; - RELEASE_ASSERT(BIO_mem_contents(buf.get(), &output, &length) == 1, ""); - - absl::string_view pem(reinterpret_cast(output), length); - cached_url_encoded_pem_encoded_peer_cert_chain_ = absl::StrCat( - cached_url_encoded_pem_encoded_peer_cert_chain_, - absl::StrReplaceAll( - pem, {{"\n", "%0A"}, {" ", "%20"}, {"+", "%2B"}, {"/", "%2F"}, {"=", "%3D"}})); - } - return cached_url_encoded_pem_encoded_peer_cert_chain_; -} - -absl::Span SslHandshakerImpl::uriSanPeerCertificate() const { - if (!cached_uri_san_peer_certificate_.empty()) { - return cached_uri_san_peer_certificate_; - } - - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_uri_san_peer_certificate_.empty()); - return cached_uri_san_peer_certificate_; - } - cached_uri_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_URI); - return cached_uri_san_peer_certificate_; -} - -absl::Span SslHandshakerImpl::dnsSansPeerCertificate() const { - if (!cached_dns_san_peer_certificate_.empty()) { - return cached_dns_san_peer_certificate_; - } - - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_dns_san_peer_certificate_.empty()); - return cached_dns_san_peer_certificate_; - } - cached_dns_san_peer_certificate_ = Utility::getSubjectAltNames(*cert, GEN_DNS); - return cached_dns_san_peer_certificate_; -} - -uint16_t SslHandshakerImpl::ciphersuiteId() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); - if (cipher == nullptr) { - return 0xffff; - } - - // From the OpenSSL docs: - // SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to - // get the cipher suite value. - return static_cast(SSL_CIPHER_get_id(cipher)); -} - -std::string SslHandshakerImpl::ciphersuiteString() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); - if (cipher == nullptr) { - return {}; - } - - return SSL_CIPHER_get_name(cipher); -} - -const std::string& SslHandshakerImpl::tlsVersion() const { - if (!cached_tls_version_.empty()) { - return cached_tls_version_; - } - cached_tls_version_ = SSL_get_version(ssl()); - return cached_tls_version_; -} - Network::PostIoAction SslHandshakerImpl::doHandshake() { ASSERT(state_ != Ssl::SocketState::HandshakeComplete && state_ != Ssl::SocketState::ShutdownSent); int rc = SSL_do_handshake(ssl()); @@ -248,90 +64,6 @@ Network::PostIoAction SslHandshakerImpl::doHandshake() { } } -const std::string& SslHandshakerImpl::serialNumberPeerCertificate() const { - if (!cached_serial_number_peer_certificate_.empty()) { - return cached_serial_number_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_serial_number_peer_certificate_.empty()); - return cached_serial_number_peer_certificate_; - } - cached_serial_number_peer_certificate_ = Utility::getSerialNumberFromCertificate(*cert.get()); - return cached_serial_number_peer_certificate_; -} - -const std::string& SslHandshakerImpl::issuerPeerCertificate() const { - if (!cached_issuer_peer_certificate_.empty()) { - return cached_issuer_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_issuer_peer_certificate_.empty()); - return cached_issuer_peer_certificate_; - } - cached_issuer_peer_certificate_ = Utility::getIssuerFromCertificate(*cert); - return cached_issuer_peer_certificate_; -} - -const std::string& SslHandshakerImpl::subjectPeerCertificate() const { - if (!cached_subject_peer_certificate_.empty()) { - return cached_subject_peer_certificate_; - } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - ASSERT(cached_subject_peer_certificate_.empty()); - return cached_subject_peer_certificate_; - } - cached_subject_peer_certificate_ = Utility::getSubjectFromCertificate(*cert); - return cached_subject_peer_certificate_; -} - -const std::string& SslHandshakerImpl::subjectLocalCertificate() const { - if (!cached_subject_local_certificate_.empty()) { - return cached_subject_local_certificate_; - } - X509* cert = SSL_get_certificate(ssl()); - if (!cert) { - ASSERT(cached_subject_local_certificate_.empty()); - return cached_subject_local_certificate_; - } - cached_subject_local_certificate_ = Utility::getSubjectFromCertificate(*cert); - return cached_subject_local_certificate_; -} - -absl::optional SslHandshakerImpl::validFromPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - return absl::nullopt; - } - return Utility::getValidFrom(*cert); -} - -absl::optional SslHandshakerImpl::expirationPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); - if (!cert) { - return absl::nullopt; - } - return Utility::getExpirationTime(*cert); -} - -const std::string& SslHandshakerImpl::sessionId() const { - if (!cached_session_id_.empty()) { - return cached_session_id_; - } - SSL_SESSION* session = SSL_get_session(ssl()); - if (session == nullptr) { - ASSERT(cached_session_id_.empty()); - return cached_session_id_; - } - - unsigned int session_id_length = 0; - const uint8_t* session_id = SSL_SESSION_get_id(session, &session_id_length); - cached_session_id_ = Hex::encode(session_id, session_id_length); - return cached_session_id_; -} - } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/ssl_handshaker.h b/source/extensions/transport_sockets/tls/ssl_handshaker.h index 81577c818174..d20799efa67e 100644 --- a/source/extensions/transport_sockets/tls/ssl_handshaker.h +++ b/source/extensions/transport_sockets/tls/ssl_handshaker.h @@ -1,11 +1,11 @@ #pragma once #include -#include #include "envoy/network/connection.h" #include "envoy/network/transport_socket.h" #include "envoy/secret/secret_callbacks.h" +#include "envoy/server/options.h" #include "envoy/ssl/handshaker.h" #include "envoy/ssl/private_key/private_key_callbacks.h" #include "envoy/ssl/ssl_socket_extended_info.h" @@ -14,6 +14,7 @@ #include "envoy/stats/stats_macros.h" #include "source/common/common/logger.h" +#include "source/extensions/transport_sockets/tls/connection_info_impl_base.h" #include "source/extensions/transport_sockets/tls/utility.h" #include "absl/container/node_hash_map.h" @@ -36,7 +37,7 @@ class SslExtendedSocketInfoImpl : public Envoy::Ssl::SslExtendedSocketInfo { Envoy::Ssl::ClientValidationStatus::NotValidated}; }; -class SslHandshakerImpl : public Ssl::ConnectionInfo, +class SslHandshakerImpl : public ConnectionInfoImplBase, public Ssl::Handshaker, protected Logger::Loggable { public: @@ -44,33 +45,16 @@ class SslHandshakerImpl : public Ssl::ConnectionInfo, Ssl::HandshakeCallbacks* handshake_callbacks); // Ssl::ConnectionInfo - bool peerCertificatePresented() const override; bool peerCertificateValidated() const override; - absl::Span uriSanLocalCertificate() const override; - const std::string& sha256PeerCertificateDigest() const override; - const std::string& sha1PeerCertificateDigest() const override; - const std::string& serialNumberPeerCertificate() const override; - const std::string& issuerPeerCertificate() const override; - const std::string& subjectPeerCertificate() const override; - const std::string& subjectLocalCertificate() const override; - absl::Span uriSanPeerCertificate() const override; - const std::string& urlEncodedPemEncodedPeerCertificate() const override; - const std::string& urlEncodedPemEncodedPeerCertificateChain() const override; - absl::Span dnsSansPeerCertificate() const override; - absl::Span dnsSansLocalCertificate() const override; - absl::optional validFromPeerCertificate() const override; - absl::optional expirationPeerCertificate() const override; - const std::string& sessionId() const override; - uint16_t ciphersuiteId() const override; - std::string ciphersuiteString() const override; - const std::string& tlsVersion() const override; + + // ConnectionInfoImplBase + SSL* ssl() const override { return ssl_.get(); } // Ssl::Handshaker Network::PostIoAction doHandshake() override; Ssl::SocketState state() const { return state_; } void setState(Ssl::SocketState state) { state_ = state; } - SSL* ssl() const { return ssl_.get(); } Ssl::HandshakeCallbacks* handshakeCallbacks() { return handshake_callbacks_; } bssl::UniquePtr ssl_; @@ -79,20 +63,6 @@ class SslHandshakerImpl : public Ssl::ConnectionInfo, Ssl::HandshakeCallbacks* handshake_callbacks_; Ssl::SocketState state_; - mutable std::vector cached_uri_san_local_certificate_; - mutable std::string cached_sha_256_peer_certificate_digest_; - mutable std::string cached_sha_1_peer_certificate_digest_; - mutable std::string cached_serial_number_peer_certificate_; - mutable std::string cached_issuer_peer_certificate_; - mutable std::string cached_subject_peer_certificate_; - mutable std::string cached_subject_local_certificate_; - mutable std::vector cached_uri_san_peer_certificate_; - mutable std::string cached_url_encoded_pem_encoded_peer_certificate_; - mutable std::string cached_url_encoded_pem_encoded_peer_cert_chain_; - mutable std::vector cached_dns_san_peer_certificate_; - mutable std::vector cached_dns_san_local_certificate_; - mutable std::string cached_session_id_; - mutable std::string cached_tls_version_; mutable SslExtendedSocketInfoImpl extended_socket_info_; }; @@ -100,15 +70,18 @@ using SslHandshakerImplSharedPtr = std::shared_ptr; class HandshakerFactoryContextImpl : public Ssl::HandshakerFactoryContext { public: - HandshakerFactoryContextImpl(Api::Api& api, absl::string_view alpn_protocols) - : api_(api), alpn_protocols_(alpn_protocols) {} + HandshakerFactoryContextImpl(Api::Api& api, const Server::Options& options, + absl::string_view alpn_protocols) + : api_(api), options_(options), alpn_protocols_(alpn_protocols) {} // HandshakerFactoryContext Api::Api& api() override { return api_; } + const Server::Options& options() const override { return options_; } absl::string_view alpnProtocols() const override { return alpn_protocols_; } private: Api::Api& api_; + const Server::Options& options_; const std::string alpn_protocols_; }; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index c5056786ee9a..31ed7fd52f01 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -164,7 +164,7 @@ Network::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) { } void SslSocket::onPrivateKeyMethodComplete() { - ASSERT(isThreadSafe()); + ASSERT(callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe()); ASSERT(info_->state() == Ssl::SocketState::HandshakeInProgress); // Resume handshake. diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 186bebbabc06..d7ce778387f6 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -73,7 +73,7 @@ class SslSocket : public Network::TransportSocket, SSL* rawSslForTest() const { return rawSsl(); } protected: - SSL* rawSsl() const { return info_->ssl_.get(); } + SSL* rawSsl() const { return info_->ssl(); } private: struct ReadResult { @@ -86,9 +86,6 @@ class SslSocket : public Network::TransportSocket, void drainErrorQueue(); void shutdownSsl(); void shutdownBasic(); - bool isThreadSafe() const { - return callbacks_ != nullptr && callbacks_->connection().dispatcher().isThreadSafe(); - } const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; Network::TransportSocketCallbacks* callbacks_{}; diff --git a/source/extensions/transport_sockets/tls/utility.cc b/source/extensions/transport_sockets/tls/utility.cc index cbc6739c0299..e2b220c9468e 100644 --- a/source/extensions/transport_sockets/tls/utility.cc +++ b/source/extensions/transport_sockets/tls/utility.cc @@ -71,6 +71,26 @@ Envoy::Ssl::CertificateDetailsPtr Utility::certificateDetails(X509* cert, const return certificate_details; } +bool Utility::dnsNameMatch(absl::string_view dns_name, absl::string_view pattern) { + const std::string lower_case_dns_name = absl::AsciiStrToLower(dns_name); + const std::string lower_case_pattern = absl::AsciiStrToLower(pattern); + if (lower_case_dns_name == lower_case_pattern) { + return true; + } + + size_t pattern_len = lower_case_pattern.length(); + if (pattern_len > 1 && lower_case_pattern[0] == '*' && lower_case_pattern[1] == '.') { + if (lower_case_dns_name.length() > pattern_len - 1) { + const size_t off = lower_case_dns_name.length() - pattern_len + 1; + return lower_case_dns_name.substr(0, off).find('.') == std::string::npos && + lower_case_dns_name.substr(off, pattern_len - 1) == + lower_case_pattern.substr(1, pattern_len - 1); + } + } + + return false; +} + namespace { enum class CertName { Issuer, Subject }; @@ -331,6 +351,15 @@ absl::string_view Utility::getErrorDescription(int err) { return SSL_ERROR_UNKNOWN_ERROR_MESSAGE; } +std::string Utility::getX509VerificationErrorInfo(X509_STORE_CTX* ctx) { + const int n = X509_STORE_CTX_get_error(ctx); + const int depth = X509_STORE_CTX_get_error_depth(ctx); + std::string error_details = + absl::StrCat("X509_verify_cert: certificate verification error at depth ", depth, ": ", + X509_verify_cert_error_string(n)); + return error_details; +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/utility.h b/source/extensions/transport_sockets/tls/utility.h index 76824328d46b..e38589e30d88 100644 --- a/source/extensions/transport_sockets/tls/utility.h +++ b/source/extensions/transport_sockets/tls/utility.h @@ -20,6 +20,14 @@ namespace Utility { Envoy::Ssl::CertificateDetailsPtr certificateDetails(X509* cert, const std::string& path, TimeSource& time_source); +/** + * Determines whether the given name matches 'pattern' which may optionally begin with a wildcard. + * @param dns_name the DNS name to match + * @param pattern the pattern to match against (*.example.com) + * @return true if the san matches pattern + */ +bool dnsNameMatch(absl::string_view dns_name, absl::string_view pattern); + /** * Retrieves the serial number of a certificate. * @param cert the certificate @@ -101,6 +109,14 @@ absl::optional getLastCryptoError(); */ absl::string_view getErrorDescription(int err); +/** + * Extracts the X509 certificate validation error information. + * + * @param ctx the store context + * @return the error details + */ +std::string getX509VerificationErrorInfo(X509_STORE_CTX* ctx); + } // namespace Utility } // namespace Tls } // namespace TransportSockets diff --git a/source/extensions/upstreams/http/http/upstream_request.h b/source/extensions/upstreams/http/http/upstream_request.h index a49dc2875890..7136e709964e 100644 --- a/source/extensions/upstreams/http/http/upstream_request.h +++ b/source/extensions/upstreams/http/http/upstream_request.h @@ -80,8 +80,9 @@ class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamC void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); } void resetStream() override { - request_encoder_->getStream().removeCallbacks(*this); - request_encoder_->getStream().resetStream(Envoy::Http::StreamResetReason::LocalReset); + auto& stream = request_encoder_->getStream(); + stream.removeCallbacks(*this); + stream.resetStream(Envoy::Http::StreamResetReason::LocalReset); } void setAccount(Buffer::BufferMemoryAccountSharedPtr account) override { @@ -102,6 +103,10 @@ class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamC upstream_request_.onBelowWriteBufferLowWatermark(); } + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { + return request_encoder_->getStream().bytesMeter(); + } + private: Router::UpstreamToDownstream& upstream_request_; Envoy::Http::RequestEncoder* request_encoder_{}; diff --git a/source/extensions/upstreams/http/tcp/upstream_request.h b/source/extensions/upstreams/http/tcp/upstream_request.h index ce947b05f943..51f5ed688bad 100644 --- a/source/extensions/upstreams/http/tcp/upstream_request.h +++ b/source/extensions/upstreams/http/tcp/upstream_request.h @@ -84,10 +84,12 @@ class TcpUpstream : public Router::GenericUpstream, void onEvent(Network::ConnectionEvent event) override; void onAboveWriteBufferHighWatermark() override; void onBelowWriteBufferLowWatermark() override; + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { return bytes_meter_; } private: Router::UpstreamToDownstream* upstream_request_; Envoy::Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; + StreamInfo::BytesMeterSharedPtr bytes_meter_{std::make_shared()}; }; } // namespace Tcp diff --git a/source/extensions/watchdog/profile_action/BUILD b/source/extensions/watchdog/profile_action/BUILD index 1de6bb89d075..5f01f35e2ff2 100644 --- a/source/extensions/watchdog/profile_action/BUILD +++ b/source/extensions/watchdog/profile_action/BUILD @@ -25,7 +25,7 @@ envoy_cc_library( "//source/common/profiler:profiler_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) @@ -40,6 +40,6 @@ envoy_cc_extension( "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:message_validator_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/watchdog/profile_action/config.h b/source/extensions/watchdog/profile_action/config.h index de821d48fced..a6d208fed4dc 100644 --- a/source/extensions/watchdog/profile_action/config.h +++ b/source/extensions/watchdog/profile_action/config.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/server/guarddog_config.h" #include "source/common/protobuf/protobuf.h" @@ -25,8 +25,7 @@ class ProfileActionFactory : public Server::Configuration::GuardDogActionFactory std::string name() const override { return "envoy.watchdog.profile_action"; } private: - using ProfileActionConfig = - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig; + using ProfileActionConfig = envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig; }; } // namespace ProfileAction diff --git a/source/extensions/watchdog/profile_action/profile_action.cc b/source/extensions/watchdog/profile_action/profile_action.cc index 3d1fc5adc465..3f0e556a3cde 100644 --- a/source/extensions/watchdog/profile_action/profile_action.cc +++ b/source/extensions/watchdog/profile_action/profile_action.cc @@ -27,7 +27,7 @@ std::string generateProfileFilePath(const std::string& directory, TimeSource& ti } // namespace ProfileAction::ProfileAction( - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config, + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context) : path_(config.profile_path()), duration_( diff --git a/source/extensions/watchdog/profile_action/profile_action.h b/source/extensions/watchdog/profile_action/profile_action.h index 144f6b9861ff..5414ee32ee3e 100644 --- a/source/extensions/watchdog/profile_action/profile_action.h +++ b/source/extensions/watchdog/profile_action/profile_action.h @@ -2,7 +2,7 @@ #include -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" @@ -18,7 +18,7 @@ namespace ProfileAction { */ class ProfileAction : public Server::Configuration::GuardDogAction { public: - ProfileAction(envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig& config, + ProfileAction(envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, diff --git a/source/server/BUILD b/source/server/BUILD index d78e4c163193..fe5c9056f655 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -272,7 +272,7 @@ envoy_cc_library( "//source/common/stats:symbol_table_lib", "//source/common/watchdog:abort_action_config", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -455,7 +455,9 @@ envoy_cc_library( ":lds_api_lib", ":transport_socket_config_lib", "//envoy/access_log:access_log_interface", + "//envoy/config:typed_metadata_interface", "//envoy/network:connection_interface", + "//envoy/network:listener_interface", "//envoy/server:api_listener_interface", "//envoy/server:filter_config_interface", "//envoy/server:listener_manager_interface", @@ -465,6 +467,7 @@ envoy_cc_library( "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/config:utility_lib", + "//source/common/config:metadata_lib", "//source/common/http:conn_manager_lib", "//source/common/init:manager_lib", "//source/common/init:target_lib", @@ -500,6 +503,7 @@ envoy_cc_library( hdrs = ["filter_chain_manager_impl.h"], deps = [ ":filter_chain_factory_context_callback", + "//envoy/config:typed_metadata_interface", "//envoy/server:instance_interface", "//envoy/server:listener_manager_interface", "//envoy/server:transport_socket_config_interface", @@ -588,6 +592,7 @@ envoy_cc_library( "//source/common/config:new_grpc_mux_lib", "//source/common/config:utility_lib", "//source/common/config:xds_resource_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/grpc:async_client_manager_lib", "//source/common/grpc:context_lib", "//source/common/http:codes_lib", diff --git a/source/server/active_udp_listener.cc b/source/server/active_udp_listener.cc index 9cb3fc4d4bd8..e0d4c64f73f8 100644 --- a/source/server/active_udp_listener.cc +++ b/source/server/active_udp_listener.cc @@ -103,7 +103,7 @@ ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concu // If filter is nullptr warn that we will be dropping packets. This is an edge case and should // only happen due to a bad factory. It's not worth adding per-worker error handling for this. - if (read_filter_ == nullptr) { + if (read_filters_.empty()) { ENVOY_LOG(warn, "UDP listener has no filters. Packets will be dropped."); } @@ -113,8 +113,11 @@ ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concu } void ActiveRawUdpListener::onDataWorker(Network::UdpRecvData&& data) { - if (read_filter_ != nullptr) { - read_filter_->onData(data); + for (auto& read_filter : read_filters_) { + Network::FilterStatus status = read_filter->onData(data); + if (status == Network::FilterStatus::StopIteration) { + return; + } } } @@ -130,14 +133,16 @@ void ActiveRawUdpListener::onWriteReady(const Network::Socket&) { } void ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { - if (read_filter_ != nullptr) { - read_filter_->onReceiveError(error_code); + for (auto& read_filter : read_filters_) { + Network::FilterStatus status = read_filter->onReceiveError(error_code); + if (status == Network::FilterStatus::StopIteration) { + return; + } } } void ActiveRawUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { - ASSERT(read_filter_ == nullptr, "Cannot add a 2nd UDP read filter"); - read_filter_ = std::move(filter); + read_filters_.emplace_back(std::move(filter)); } Network::UdpListener& ActiveRawUdpListener::udpListener() { return *udp_listener_; } diff --git a/source/server/active_udp_listener.h b/source/server/active_udp_listener.h index 68918ffc3930..e3dd74bfb95f 100644 --- a/source/server/active_udp_listener.h +++ b/source/server/active_udp_listener.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include "envoy/network/connection_handler.h" @@ -100,7 +101,7 @@ class ActiveRawUdpListener : public ActiveUdpListenerBase, // The read filter refers to the UDP listener to send packets to downstream. // If the UDP listener is deleted before the read filter, the read filter may try to use it // after deletion. - read_filter_.reset(); + read_filters_.clear(); udp_listener_.reset(); } // These two are unreachable because a config will be rejected if it configures both this listener @@ -117,7 +118,7 @@ class ActiveRawUdpListener : public ActiveUdpListenerBase, Network::UdpListener& udpListener() override; private: - Network::UdpListenerReadFilterPtr read_filter_; + std::list read_filters_; Network::UdpPacketWriterPtr udp_packet_writer_; }; diff --git a/source/server/config_validation/dispatcher.cc b/source/server/config_validation/dispatcher.cc index 36010fd3107a..155b482709e2 100644 --- a/source/server/config_validation/dispatcher.cc +++ b/source/server/config_validation/dispatcher.cc @@ -15,12 +15,6 @@ Network::ClientConnectionPtr ValidationDispatcher::createClientConnection( std::move(transport_socket), options); } -Network::DnsResolverSharedPtr ValidationDispatcher::createDnsResolver( - const std::vector&, - const envoy::config::core::v3::DnsResolverOptions&) { - return dns_resolver_; -} - Network::ListenerPtr ValidationDispatcher::createListener(Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&, bool) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; diff --git a/source/server/config_validation/dispatcher.h b/source/server/config_validation/dispatcher.h index 2d2f1a21416b..e16829cdb0b6 100644 --- a/source/server/config_validation/dispatcher.h +++ b/source/server/config_validation/dispatcher.h @@ -23,15 +23,8 @@ class ValidationDispatcher : public DispatcherImpl { createClientConnection(Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&&, const Network::ConnectionSocket::OptionsSharedPtr& options) override; - Network::DnsResolverSharedPtr createDnsResolver( - const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) override; Network::ListenerPtr createListener(Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&, bool bind_to_port) override; - -protected: - std::shared_ptr dns_resolver_{ - std::make_shared()}; }; } // namespace Event diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 4509400fcc90..b86959385c3c 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -16,6 +16,7 @@ #include "source/common/common/assert.h" #include "source/common/common/random_generator.h" #include "source/common/grpc/common.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/quic/quic_stat_names.h" #include "source/common/router/context_impl.h" @@ -76,7 +77,10 @@ class ValidationInstance final : Logger::Loggable, Ssl::ContextManager& sslContextManager() override { return *ssl_context_manager_; } Event::Dispatcher& dispatcher() override { return *dispatcher_; } Network::DnsResolverSharedPtr dnsResolver() override { - return dispatcher().createDnsResolver({}, envoy::config::core::v3::DnsResolverOptions()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDefaultDnsResolverFactory(typed_dns_resolver_config); + return dns_resolver_factory.createDnsResolver(dispatcher(), api(), typed_dns_resolver_config); } void drainListeners() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } DrainManager& drainManager() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/server/factory_context_base_impl.h b/source/server/factory_context_base_impl.h index 4b56a110ccf2..0579f86a4eb5 100644 --- a/source/server/factory_context_base_impl.h +++ b/source/server/factory_context_base_impl.h @@ -18,6 +18,13 @@ class FactoryContextBaseImpl : public Configuration::FactoryContextBase { singleton_manager_(singleton_manager), validation_visitor_(validation_visitor), scope_(scope), thread_local_(local) {} + FactoryContextBaseImpl(Configuration::FactoryContextBase& config) + : options_(config.options()), main_thread_dispatcher_(config.mainThreadDispatcher()), + api_(config.api()), local_info_(config.localInfo()), admin_(config.admin()), + runtime_(config.runtime()), singleton_manager_(config.singletonManager()), + validation_visitor_(config.messageValidationVisitor()), scope_(config.scope()), + thread_local_(config.threadLocal()) {} + // FactoryContextBase const Options& options() override { return options_; }; Event::Dispatcher& mainThreadDispatcher() override { return main_thread_dispatcher_; }; @@ -30,6 +37,7 @@ class FactoryContextBaseImpl : public Configuration::FactoryContextBase { return validation_visitor_; }; Stats::Scope& scope() override { return scope_; }; + Stats::Scope& serverScope() override { return scope_; } ThreadLocal::SlotAllocator& threadLocal() override { return thread_local_; }; private: @@ -41,8 +49,8 @@ class FactoryContextBaseImpl : public Configuration::FactoryContextBase { Runtime::Loader& runtime_; Singleton::Manager& singleton_manager_; ProtobufMessage::ValidationVisitor& validation_visitor_; - Stats::Store& scope_; - ThreadLocal::Instance& thread_local_; + Stats::Scope& scope_; + ThreadLocal::SlotAllocator& thread_local_; }; } // namespace Server diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index f99ed1f1858c..bff85725036f 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -31,7 +31,9 @@ Network::Address::InstanceConstSharedPtr fakeAddress() { PerFilterChainFactoryContextImpl::PerFilterChainFactoryContextImpl( Configuration::FactoryContext& parent_context, Init::Manager& init_manager) - : parent_context_(parent_context), init_manager_(init_manager) {} + : parent_context_(parent_context), scope_(parent_context_.scope().createScope("")), + filter_chain_scope_(parent_context_.listenerScope().createScope("")), + init_manager_(init_manager) {} bool PerFilterChainFactoryContextImpl::drainClose() const { return is_draining_.load() || parent_context_.drainDecision().drainClose(); @@ -50,6 +52,11 @@ PerFilterChainFactoryContextImpl::listenerMetadata() const { return parent_context_.listenerMetadata(); } +const Envoy::Config::TypedMetadata& +PerFilterChainFactoryContextImpl::listenerTypedMetadata() const { + return parent_context_.listenerTypedMetadata(); +} + envoy::config::core::v3::TrafficDirection PerFilterChainFactoryContextImpl::direction() const { return parent_context_.direction(); } @@ -101,7 +108,7 @@ Envoy::Runtime::Loader& PerFilterChainFactoryContextImpl::runtime() { return parent_context_.runtime(); } -Stats::Scope& PerFilterChainFactoryContextImpl::scope() { return parent_context_.scope(); } +Stats::Scope& PerFilterChainFactoryContextImpl::scope() { return *scope_; } Singleton::Manager& PerFilterChainFactoryContextImpl::singletonManager() { return parent_context_.singletonManager(); @@ -135,9 +142,7 @@ PerFilterChainFactoryContextImpl::getTransportSocketFactoryContext() const { return parent_context_.getTransportSocketFactoryContext(); } -Stats::Scope& PerFilterChainFactoryContextImpl::listenerScope() { - return parent_context_.listenerScope(); -} +Stats::Scope& PerFilterChainFactoryContextImpl::listenerScope() { return *filter_chain_scope_; } bool PerFilterChainFactoryContextImpl::isQuicListener() const { return parent_context_.isQuicListener(); @@ -791,6 +796,10 @@ FactoryContextImpl::getTransportSocketFactoryContext() const { const envoy::config::core::v3::Metadata& FactoryContextImpl::listenerMetadata() const { return config_.metadata(); } +const Envoy::Config::TypedMetadata& FactoryContextImpl::listenerTypedMetadata() const { + // TODO(nareddyt): Needs an implementation for this context. Currently not used. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +} envoy::config::core::v3::TrafficDirection FactoryContextImpl::direction() const { return config_.traffic_direction(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index af3be80f7752..14485d60108f 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -5,6 +5,7 @@ #include #include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/config/typed_metadata.h" #include "envoy/network/drain_decision.h" #include "envoy/server/filter_config.h" #include "envoy/server/instance.h" @@ -66,11 +67,13 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return parent_context_.serverScope(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::SlotAllocator& threadLocal() override; Admin& admin() override; const envoy::config::core::v3::Metadata& listenerMetadata() const override; + const Envoy::Config::TypedMetadata& listenerTypedMetadata() const override; envoy::config::core::v3::TrafficDirection direction() const override; TimeSource& timeSource() override; ProtobufMessage::ValidationVisitor& messageValidationVisitor() override; @@ -87,6 +90,10 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor private: Configuration::FactoryContext& parent_context_; + // The scope that has empty prefix. + Stats::ScopePtr scope_; + // filter_chain_scope_ has the same prefix as listener owners scope. + Stats::ScopePtr filter_chain_scope_; Init::Manager& init_manager_; std::atomic is_draining_{false}; }; @@ -151,6 +158,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return server_.stats(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::SlotAllocator& threadLocal() override; @@ -164,6 +172,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { Configuration::ServerFactoryContext& getServerFactoryContext() const override; Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; const envoy::config::core::v3::Metadata& listenerMetadata() const override; + const Envoy::Config::TypedMetadata& listenerTypedMetadata() const override; envoy::config::core::v3::TrafficDirection direction() const override; Network::DrainDecision& drainDecision() override; Stats::Scope& listenerScope() override; diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index 9aafc65b714f..cfc8256d5420 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -13,7 +13,7 @@ #include "envoy/server/guarddog.h" #include "envoy/server/guarddog_config.h" #include "envoy/stats/scope.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" @@ -69,14 +69,14 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio // Add default abort_action if kill and/or multi-kill is enabled. if (config.killTimeout().count() > 0) { - envoy::watchdog::v3alpha::AbortActionConfig abort_config; + envoy::watchdog::v3::AbortActionConfig abort_config; WatchDogAction* abort_action_config = actions.Add(); abort_action_config->set_event(WatchDogAction::KILL); abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); } if (config.multiKillTimeout().count() > 0) { - envoy::watchdog::v3alpha::AbortActionConfig abort_config; + envoy::watchdog::v3::AbortActionConfig abort_config; WatchDogAction* abort_action_config = actions.Add(); abort_action_config->set_event(WatchDogAction::MULTIKILL); abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 943ba20d4c18..e7bd3366702d 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -207,8 +207,8 @@ void ListenSocketFactoryImpl::doFinalPreWorkerInit() { ListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl( Envoy::Server::Instance& server, ProtobufMessage::ValidationVisitor& validation_visitor, const envoy::config::listener::v3::Listener& config, DrainManagerPtr drain_manager) - : server_(server), metadata_(config.metadata()), direction_(config.traffic_direction()), - global_scope_(server.stats().createScope("")), + : server_(server), metadata_(config.metadata()), typed_metadata_(config.metadata()), + direction_(config.traffic_direction()), global_scope_(server.stats().createScope("")), listener_scope_(server_.stats().createScope( fmt::format("listener.{}.", !config.stat_prefix().empty() @@ -249,6 +249,9 @@ Admin& ListenerFactoryContextBaseImpl::admin() { return server_.admin(); } const envoy::config::core::v3::Metadata& ListenerFactoryContextBaseImpl::listenerMetadata() const { return metadata_; }; +const Envoy::Config::TypedMetadata& ListenerFactoryContextBaseImpl::listenerTypedMetadata() const { + return typed_metadata_; +} envoy::config::core::v3::TrafficDirection ListenerFactoryContextBaseImpl::direction() const { return direction_; }; @@ -518,12 +521,6 @@ void ListenerImpl::createListenerFilterFactories(Network::Socket::Type socket_ty if (!config_.listener_filters().empty()) { switch (socket_type) { case Network::Socket::Type::Datagram: - if (config_.listener_filters().size() > 1) { - // Currently supports only 1 UDP listener filter. - throw EnvoyException(fmt::format( - "error adding listener '{}': Only 1 UDP listener filter per listener supported", - address_->asString())); - } udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList( config_.listener_filters(), *listener_factory_context_); break; @@ -686,6 +683,9 @@ Admin& PerListenerFactoryContextImpl::admin() { return listener_factory_context_ const envoy::config::core::v3::Metadata& PerListenerFactoryContextImpl::listenerMetadata() const { return listener_factory_context_base_->listenerMetadata(); }; +const Envoy::Config::TypedMetadata& PerListenerFactoryContextImpl::listenerTypedMetadata() const { + return listener_factory_context_base_->listenerTypedMetadata(); +} envoy::config::core::v3::TrafficDirection PerListenerFactoryContextImpl::direction() const { return listener_factory_context_base_->direction(); }; @@ -740,7 +740,8 @@ void ListenerImpl::createUdpListenerFilterChain(Network::UdpListenerFilterManage void ListenerImpl::debugLog(const std::string& message) { UNREFERENCED_PARAMETER(message); - ENVOY_LOG(debug, "{}: name={}, hash={}, address={}", message, name_, hash_, address_->asString()); + ENVOY_LOG(debug, "{}: name={}, hash={}, tag={}, address={}", message, name_, hash_, listener_tag_, + address_->asString()); } void ListenerImpl::initialize() { diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 46292ec42269..969393478d99 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -5,8 +5,10 @@ #include "envoy/access_log/access_log.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/typed_metadata.h" #include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" +#include "envoy/network/listener.h" #include "envoy/server/drain_manager.h" #include "envoy/server/filter_config.h" #include "envoy/server/instance.h" @@ -15,6 +17,7 @@ #include "source/common/common/basic_resource_impl.h" #include "source/common/common/logger.h" +#include "source/common/config/metadata.h" #include "source/common/init/manager_impl.h" #include "source/common/init/target_impl.h" #include "source/common/quic/quic_stat_names.h" @@ -114,12 +117,14 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; + Stats::Scope& serverScope() override { return server_.stats(); } Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::Instance& threadLocal() override; Admin& admin() override; const envoy::config::core::v3::Metadata& listenerMetadata() const override; + const Envoy::Config::TypedMetadata& listenerTypedMetadata() const override; envoy::config::core::v3::TrafficDirection direction() const override; TimeSource& timeSource() override; ProtobufMessage::ValidationContext& messageValidationContext() override; @@ -145,6 +150,8 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex private: Envoy::Server::Instance& server_; const envoy::config::core::v3::Metadata metadata_; + const Envoy::Config::TypedMetadataImpl + typed_metadata_; envoy::config::core::v3::TrafficDirection direction_; Stats::ScopePtr global_scope_; Stats::ScopePtr listener_scope_; // Stats with listener named scope. @@ -188,11 +195,13 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte const LocalInfo::LocalInfo& localInfo() const override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; + Stats::Scope& serverScope() override { return listener_factory_context_base_->serverScope(); } Singleton::Manager& singletonManager() override; OverloadManager& overloadManager() override; ThreadLocal::Instance& threadLocal() override; Admin& admin() override; const envoy::config::core::v3::Metadata& listenerMetadata() const override; + const Envoy::Config::TypedMetadata& listenerTypedMetadata() const override; envoy::config::core::v3::TrafficDirection direction() const override; TimeSource& timeSource() override; ProtobufMessage::ValidationContext& messageValidationContext() override; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index afd6ebd142d6..c1e27ee9db29 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -984,6 +984,23 @@ void ListenerManagerImpl::setNewOrDrainingSocketFactory( if (existing_draining_listener != draining_listeners_.cend()) { draining_listen_socket_factory = &existing_draining_listener->listener_->getSocketFactory(); + existing_draining_listener->listener_->debugLog("clones listener sockets"); + } else { + auto existing_draining_filter_chain = std::find_if( + draining_filter_chains_manager_.cbegin(), draining_filter_chains_manager_.cend(), + [&listener](const DrainingFilterChainsManager& draining_filter_chain) { + return draining_filter_chain.getDrainingListener() + .listenSocketFactory() + .getListenSocket(0) + ->isOpen() && + listener.hasCompatibleAddress(draining_filter_chain.getDrainingListener()); + }); + + if (existing_draining_filter_chain != draining_filter_chains_manager_.cend()) { + draining_listen_socket_factory = + &existing_draining_filter_chain->getDrainingListener().getSocketFactory(); + existing_draining_filter_chain->getDrainingListener().debugLog("clones listener socket"); + } } listener.setSocketFactory(draining_listen_socket_factory != nullptr diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index ad8505467195..3401b71ad43a 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -143,7 +143,7 @@ class DrainingFilterChainsManager { const std::list& getDrainingFilterChains() const { return draining_filter_chains_; } - ListenerImpl& getDrainingListener() { return *draining_listener_; } + ListenerImpl& getDrainingListener() const { return *draining_listener_; } uint64_t decWorkersPendingRemoval() { return --workers_pending_removal_; } // Schedule listener destroy. diff --git a/source/server/proto_descriptors.cc b/source/server/proto_descriptors.cc index 9638b84ba4f3..e94492c531cc 100644 --- a/source/server/proto_descriptors.cc +++ b/source/server/proto_descriptors.cc @@ -20,6 +20,7 @@ void validateProtoDescriptors() { "envoy.service.endpoint.v3.EndpointDiscoveryService.FetchEndpoints", "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints", "envoy.service.endpoint.v3.EndpointDiscoveryService.DeltaEndpoints", + "envoy.service.endpoint.v3.LocalityEndpointDiscoveryService.DeltaLocalityEndpoints", "envoy.service.health.v3.HealthDiscoveryService.FetchHealthCheck", "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", "envoy.service.listener.v3.ListenerDiscoveryService.FetchListeners", @@ -39,9 +40,10 @@ void validateProtoDescriptors() { } const auto types = { - "envoy.config.cluster.v3.Cluster", "envoy.config.endpoint.v3.ClusterLoadAssignment", - "envoy.config.listener.v3.Listener", "envoy.config.route.v3.RouteConfiguration", - "envoy.config.route.v3.VirtualHost", "envoy.extensions.transport_sockets.tls.v3.Secret", + "envoy.config.cluster.v3.Cluster", "envoy.config.endpoint.v3.ClusterLoadAssignment", + "envoy.config.listener.v3.Listener", "envoy.config.route.v3.RouteConfiguration", + "envoy.config.route.v3.VirtualHost", "envoy.extensions.transport_sockets.tls.v3.Secret", + "envoy.config.endpoint.v3.LbEndpoint", }; for (const auto& type : types) { diff --git a/source/server/server.cc b/source/server/server.cc index 658b77a5734b..6af174a4bd4e 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -31,12 +31,14 @@ #include "source/common/config/grpc_mux_impl.h" #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/utility.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/config/xds_resource.h" #include "source/common/http/codes.h" #include "source/common/http/headers.h" #include "source/common/local_info/local_info_impl.h" #include "source/common/memory/stats.h" #include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/socket_interface.h" #include "source/common/network/socket_interface_impl.h" #include "source/common/network/tcp_listener_impl.h" @@ -106,7 +108,7 @@ InstanceImpl::InstanceImpl( restarter_.initialize(*dispatcher_, *this); drain_manager_ = component_factory.createDrainManager(*this); - initialize(options, std::move(local_address), component_factory); + initialize(std::move(local_address), component_factory); } END_TRY catch (const EnvoyException& e) { @@ -362,11 +364,10 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& MessageUtil::validate(bootstrap, validation_visitor); } -void InstanceImpl::initialize(const Options& options, - Network::Address::InstanceConstSharedPtr local_address, +void InstanceImpl::initialize(Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory) { ENVOY_LOG(info, "initializing epoch {} (base id={}, hot restart version={})", - options.restartEpoch(), restarter_.baseId(), restarter_.version()); + options_.restartEpoch(), restarter_.baseId(), restarter_.version()); ENVOY_LOG(info, "statically linked extensions:"); for (const auto& ext : Envoy::Registry::FactoryCategoryRegistry::registeredFactories()) { @@ -374,7 +375,7 @@ void InstanceImpl::initialize(const Options& options, } // Handle configuration that needs to take place prior to the main configuration load. - InstanceUtil::loadBootstrapConfig(bootstrap_, options, + InstanceUtil::loadBootstrapConfig(bootstrap_, options_, messageValidationContext().staticValidationVisitor(), *api_); bootstrap_config_update_time_ = time_source_.systemTime(); @@ -413,10 +414,9 @@ void InstanceImpl::initialize(const Options& options, POOL_COUNTER_PREFIX(stats_store_, server_compilation_settings_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_compilation_settings_stats_prefix), POOL_HISTOGRAM_PREFIX(stats_store_, server_compilation_settings_stats_prefix))}); - validation_context_.staticWarningValidationVisitor().setUnknownCounter( - server_stats_->static_unknown_fields_); - validation_context_.dynamicWarningValidationVisitor().setUnknownCounter( - server_stats_->dynamic_unknown_fields_); + validation_context_.setCounters(server_stats_->static_unknown_fields_, + server_stats_->dynamic_unknown_fields_, + server_stats_->wip_protos_); initialization_timer_ = std::make_unique( server_stats_->initialization_time_ms_, timeSource()); @@ -472,7 +472,7 @@ void InstanceImpl::initialize(const Options& options, local_info_ = std::make_unique( stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, - options.serviceZone(), options.serviceClusterName(), options.serviceNodeName()); + options_.serviceZone(), options_.serviceClusterName(), options_.serviceNodeName()); Configuration::InitialImpl initial_config(bootstrap_); @@ -496,7 +496,7 @@ void InstanceImpl::initialize(const Options& options, // Initialize the overload manager early so other modules can register for actions. overload_manager_ = std::make_unique( *dispatcher_, stats_store_, thread_local_, bootstrap_.overload_manager(), - messageValidationContext().staticValidationVisitor(), *api_, options); + messageValidationContext().staticValidationVisitor(), *api_, options_); heap_shrinker_ = std::make_unique(*dispatcher_, *overload_manager_, stats_store_); @@ -579,7 +579,7 @@ void InstanceImpl::initialize(const Options& options, initial_config.initAdminAccessLog(bootstrap_, *this); if (initial_config.admin().address()) { - admin_->startHttpListener(initial_config.admin().accessLogs(), options.adminAddressPath(), + admin_->startHttpListener(initial_config.admin().accessLogs(), options_.adminAddressPath(), initial_config.admin().address(), initial_config.admin().socketOptions(), stats_store_.createScope("listener.admin.")); @@ -595,23 +595,11 @@ void InstanceImpl::initialize(const Options& options, // Once we have runtime we can initialize the SSL context manager. ssl_context_manager_ = createContextManager("ssl_context_manager", time_source_); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - std::vector resolvers; - if (bootstrap_.has_dns_resolution_config()) { - dns_resolver_options.CopyFrom(bootstrap_.dns_resolution_config().dns_resolver_options()); - if (!bootstrap_.dns_resolution_config().resolvers().empty()) { - const auto& resolver_addrs = bootstrap_.dns_resolution_config().resolvers(); - resolvers.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); - } - } - } else { - // Field bool `use_tcp_for_dns_lookups` will be deprecated in future. To be backward compatible - // utilize bootstrap_.use_tcp_for_dns_lookups() if `bootstrap_.dns_resolver_options` is not set. - dns_resolver_options.set_use_tcp_for_dns_lookups(bootstrap_.use_tcp_for_dns_lookups()); - } - dns_resolver_ = dispatcher_->createDnsResolver(resolvers, dns_resolver_options); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDnsResolverFactoryFromProto(bootstrap_, typed_dns_resolver_config); + dns_resolver_ = + dns_resolver_factory.createDnsResolver(dispatcher(), api(), typed_dns_resolver_config); cluster_manager_factory_ = std::make_unique( *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, dns_resolver_, @@ -862,6 +850,8 @@ void InstanceImpl::terminate() { // TODO: figure out the correct fix: https://github.com/envoyproxy/envoy/issues/15072. Config::GrpcMuxImpl::shutdownAll(); Config::NewGrpcMuxImpl::shutdownAll(); + Config::XdsMux::GrpcMuxSotw::shutdownAll(); + Config::XdsMux::GrpcMuxDelta::shutdownAll(); if (overload_manager_) { overload_manager_->stop(); @@ -930,7 +920,7 @@ InstanceImpl::registerCallback(Stage stage, StageCallbackWithCompletion callback } void InstanceImpl::notifyCallbacksForStage(Stage stage, Event::PostCb completion_cb) { - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); const auto it = stage_callbacks_.find(stage); if (it != stage_callbacks_.end()) { for (const StageCallback& callback : it->second) { diff --git a/source/server/server.h b/source/server/server.h index d855b5ae46ec..19ac1b5ce169 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -71,6 +71,7 @@ struct ServerCompilationSettingsStats { COUNTER(envoy_bug_failures) \ COUNTER(dynamic_unknown_fields) \ COUNTER(static_unknown_fields) \ + COUNTER(wip_protos) \ COUNTER(dropped_stat_flushes) \ GAUGE(concurrency, NeverImport) \ GAUGE(days_until_first_cert_expiring, NeverImport) \ @@ -182,6 +183,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, } Envoy::Runtime::Loader& runtime() override { return server_.runtime(); } Stats::Scope& scope() override { return *server_scope_; } + Stats::Scope& serverScope() override { return *server_scope_; } Singleton::Manager& singletonManager() override { return server_.singletonManager(); } ThreadLocal::Instance& threadLocal() override { return server_.threadLocal(); } Admin& admin() override { return server_.admin(); } @@ -302,7 +304,7 @@ class InstanceImpl final : Logger::Loggable, ProtobufTypes::MessagePtr dumpBootstrapConfig(); void flushStatsInternal(); void updateServerStats(); - void initialize(const Options& options, Network::Address::InstanceConstSharedPtr local_address, + void initialize(Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory); void loadServerFlags(const absl::optional& flags_path); void startWorkers(); diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index 6ab63fb6a51d..3af910496320 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -43,6 +43,7 @@ int main(int argc, char** argv) { } TestEnvironment::initializeTestMain(argv[0]); + Thread::TestThread test_thread; // Suppressing non-error messages in benchmark tests. This hides warning // messages that appear when using a runtime feature when there isn't an initialized diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index f309fb942a7e..07b03106064b 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -120,7 +120,7 @@ name: accesslog path: /dev/null log_format: text_format_source: - inline_string: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %ROUTE_NAME% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" + inline_string: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %ROUTE_NAME% %BYTES_RECEIVED% %BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% %UPSTREAM_WIRE_BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" )EOF"; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); @@ -135,10 +135,10 @@ name: accesslog log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); - EXPECT_EQ( - "[1999-01-01T00:00:00.000Z] \"GET / HTTP/1.1\" 0 UF route-test-name 1 2 3 - \"x.x.x.x\" " - "\"user-agent-set\" \"id\" \"host\"\n", - output_); + EXPECT_EQ("[1999-01-01T00:00:00.000Z] \"GET / HTTP/1.1\" 0 UF route-test-name 1 2 0 0 3 - " + "\"x.x.x.x\" " + "\"user-agent-set\" \"id\" \"host\"\n", + output_); } TEST_F(AccessLogImplTest, HeadersBytes) { diff --git a/test/common/common/BUILD b/test/common/common/BUILD index a4705d1bc45f..b5fb9a2ad472 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -103,6 +103,16 @@ envoy_cc_test( deps = ["//source/common/common:cleanup_lib"], ) +envoy_cc_test( + name = "dns_utils_test", + srcs = ["dns_utils_test.cc"], + deps = [ + "//source/common/common:dns_utils_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "mem_block_builder_test", srcs = ["mem_block_builder_test.cc"], diff --git a/test/common/common/backoff_strategy_test.cc b/test/common/common/backoff_strategy_test.cc index 67225242cde9..a5111809bf44 100644 --- a/test/common/common/backoff_strategy_test.cc +++ b/test/common/common/backoff_strategy_test.cc @@ -23,11 +23,15 @@ TEST(ExponentialBackOffStrategyTest, JitteredBackOffBasicReset) { ON_CALL(random, random()).WillByDefault(Return(27)); JitteredExponentialBackOffStrategy jittered_back_off(25, 30, random); - EXPECT_EQ(2, jittered_back_off.nextBackOffMs()); + EXPECT_EQ(2, jittered_back_off.nextBackOffMs()); // 25 % 27 EXPECT_EQ(27, jittered_back_off.nextBackOffMs()); jittered_back_off.reset(); EXPECT_EQ(2, jittered_back_off.nextBackOffMs()); // Should start from start + EXPECT_EQ(27, jittered_back_off.nextBackOffMs()); + + jittered_back_off.reset(26); + EXPECT_EQ(1, jittered_back_off.nextBackOffMs()); // 26 % 27 } TEST(ExponentialBackOffStrategyTest, JitteredBackOffDoesntOverflow) { @@ -77,6 +81,9 @@ TEST(ExponentialBackOffStrategyTest, JitteredBackOffWithMaxIntervalReset) { EXPECT_EQ(79, jittered_back_off.nextBackOffMs()); EXPECT_EQ(99, jittered_back_off.nextBackOffMs()); // Should return Max here EXPECT_EQ(99, jittered_back_off.nextBackOffMs()); + + jittered_back_off.reset(4); + EXPECT_EQ(3, jittered_back_off.nextBackOffMs()); } TEST(LowerBoundBackOffStrategyTest, JitteredBackOffWithLowRandomValue) { @@ -102,6 +109,9 @@ TEST(FixedBackOffStrategyTest, FixedBackOffBasicReset) { fixed_back_off.reset(); EXPECT_EQ(30, fixed_back_off.nextBackOffMs()); + + fixed_back_off.reset(20); + EXPECT_EQ(20, fixed_back_off.nextBackOffMs()); } } // namespace Envoy diff --git a/test/common/common/dns_utils_test.cc b/test/common/common/dns_utils_test.cc new file mode 100644 index 000000000000..3ea82592e71b --- /dev/null +++ b/test/common/common/dns_utils_test.cc @@ -0,0 +1,60 @@ +#include "source/common/common/dns_utils.h" +#include "source/common/network/utility.h" + +#include "test/test_common/test_runtime.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +TEST(DnsUtils, LegacyGenerateTest) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_multiple_dns_addresses", "false"}}); + + std::list responses = + TestUtility::makeDnsResponse({"10.0.0.1", "10.0.0.2"}); + std::vector addresses = + DnsUtils::generateAddressList(responses, 2); + EXPECT_EQ(0, addresses.size()); +} + +TEST(DnsUtils, MultipleGenerateTest) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_multiple_dns_addresses", "true"}}); + + std::list responses = + TestUtility::makeDnsResponse({"10.0.0.1", "10.0.0.2"}); + std::vector addresses = + DnsUtils::generateAddressList(responses, 2); + ASSERT_EQ(2, addresses.size()); + EXPECT_EQ(addresses[0]->asString(), "10.0.0.1:2"); + EXPECT_EQ(addresses[1]->asString(), "10.0.0.2:2"); +} + +TEST(DnsUtils, ListChanged) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_multiple_dns_addresses", "true"}}); + + Network::Address::InstanceConstSharedPtr address1 = + Network::Utility::parseInternetAddress("10.0.0.1"); + Network::Address::InstanceConstSharedPtr address1_dup = + Network::Utility::parseInternetAddress("10.0.0.1"); + Network::Address::InstanceConstSharedPtr address2 = + Network::Utility::parseInternetAddress("10.0.0.2"); + + std::vector addresses1 = {address1, address2}; + std::vector addresses2 = {address1_dup, address2}; + EXPECT_FALSE(DnsUtils::listChanged(addresses1, addresses2)); + + std::vector addresses3 = {address2, address1}; + EXPECT_TRUE(DnsUtils::listChanged(addresses1, addresses3)); + EXPECT_TRUE(DnsUtils::listChanged(addresses1, {address2})); +} + +} // namespace +} // namespace Envoy diff --git a/test/common/common/stl_helpers_test.cc b/test/common/common/stl_helpers_test.cc index 4123ec9a0548..af98384ab3b4 100644 --- a/test/common/common/stl_helpers_test.cc +++ b/test/common/common/stl_helpers_test.cc @@ -6,7 +6,14 @@ namespace Envoy { -TEST(StlHelpersTest, TestOutputToStreamOperator) { +TEST(StlHelpersTest, TestPairOutputToStreamOperator) { + std::stringstream os; + std::pair v{10, "five"}; + os << v; + EXPECT_EQ("pair(10, five)", os.str()); +} + +TEST(StlHelpersTest, TestVectorOutputToStreamOperator) { std::stringstream os; std::vector v{1, 2, 3, 4, 5}; os << v; diff --git a/test/common/config/BUILD b/test/common/config/BUILD index bc783732c18c..639b2d44d394 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -75,6 +75,27 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "delta_subscription_state_old_test", + srcs = ["delta_subscription_state_old_test.cc"], + deps = [ + "//source/common/config:delta_subscription_state_lib", + "//source/common/config:grpc_subscription_lib", + "//source/common/config:new_grpc_mux_lib", + "//source/common/stats:isolated_store_lib", + "//test/mocks:common_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "sotw_subscription_state_test", srcs = ["sotw_subscription_state_test.cc"], @@ -240,6 +261,7 @@ envoy_cc_test_library( "//source/common/config:api_version_lib", "//source/common/config:grpc_mux_lib", "//source/common/config:grpc_subscription_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/grpc:grpc_mocks", @@ -260,6 +282,7 @@ envoy_cc_test_library( ":subscription_test_harness", "//source/common/common:utility_lib", "//source/common/config:new_grpc_mux_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/grpc:common_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", @@ -421,6 +444,7 @@ envoy_cc_test( "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@com_github_cncf_udpa//udpa/type/v1:pkg_cc_proto", + "@com_github_cncf_udpa//xds/type/v3:pkg_cc_proto", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 8a5d0a632c97..070ff1631024 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/base.pb.validate.h" +#include "source/common/common/cleanup.h" #include "source/common/common/empty_string.h" #include "source/common/config/datasource.h" #include "source/common/http/message_impl.h" @@ -9,6 +10,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/upstream/cluster_manager.h" +#include "test/test_common/environment.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -586,6 +588,71 @@ TEST_F(AsyncDataSourceTest, BaseIntervalTest) { EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, config), EnvoyException); } +TEST(DataSourceTest, WellKnownEnvironmentVariableTest) { + envoy::config::core::v3::DataSource config; + + const std::string yaml = R"EOF( + environment_variable: + PATH + )EOF"; + TestUtility::loadFromYamlAndValidate(yaml, config); + + EXPECT_EQ(envoy::config::core::v3::DataSource::SpecifierCase::kEnvironmentVariable, + config.specifier_case()); + EXPECT_EQ(config.environment_variable(), "PATH"); + Api::ApiPtr api = Api::createApiForTest(); + const auto path_data = DataSource::read(config, false, *api); + EXPECT_FALSE(path_data.empty()); +} + +TEST(DataSourceTest, MissingEnvironmentVariableTest) { + envoy::config::core::v3::DataSource config; + + const std::string yaml = R"EOF( + environment_variable: + ThisVariableDoesntExist + )EOF"; + TestUtility::loadFromYamlAndValidate(yaml, config); + + EXPECT_EQ(envoy::config::core::v3::DataSource::SpecifierCase::kEnvironmentVariable, + config.specifier_case()); + EXPECT_EQ(config.environment_variable(), "ThisVariableDoesntExist"); + Api::ApiPtr api = Api::createApiForTest(); + EXPECT_THROW_WITH_MESSAGE(DataSource::read(config, false, *api), EnvoyException, + "Environment variable doesn't exist: ThisVariableDoesntExist"); + EXPECT_THROW_WITH_MESSAGE(DataSource::read(config, true, *api), EnvoyException, + "Environment variable doesn't exist: ThisVariableDoesntExist"); +} + +TEST(DataSourceTest, EmptyEnvironmentVariableTest) { + envoy::config::core::v3::DataSource config; + TestEnvironment::setEnvVar("ThisVariableIsEmpty", "", 1); + Envoy::Cleanup cleanup([]() { TestEnvironment::unsetEnvVar("ThisVariableIsEmpty"); }); + + const std::string yaml = R"EOF( + environment_variable: + ThisVariableIsEmpty + )EOF"; + TestUtility::loadFromYamlAndValidate(yaml, config); + + EXPECT_EQ(envoy::config::core::v3::DataSource::SpecifierCase::kEnvironmentVariable, + config.specifier_case()); + EXPECT_EQ(config.environment_variable(), "ThisVariableIsEmpty"); + Api::ApiPtr api = Api::createApiForTest(); +#ifdef WIN32 + // Windows doesn't support empty environment variables. + EXPECT_THROW_WITH_MESSAGE(DataSource::read(config, false, *api), EnvoyException, + "Environment variable doesn't exist: ThisVariableIsEmpty"); + EXPECT_THROW_WITH_MESSAGE(DataSource::read(config, true, *api), EnvoyException, + "Environment variable doesn't exist: ThisVariableIsEmpty"); +#else + EXPECT_THROW_WITH_MESSAGE(DataSource::read(config, false, *api), EnvoyException, + "DataSource cannot be empty"); + const auto environment_variable = DataSource::read(config, true, *api); + EXPECT_TRUE(environment_variable.empty()); +#endif +} + } // namespace } // namespace Config } // namespace Envoy diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 13ab2522e325..159cef64cfae 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -11,9 +11,10 @@ namespace Envoy { namespace Config { namespace { -class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public testing::Test { +class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, + public testing::TestWithParam { protected: - DeltaSubscriptionImplTest() = default; + DeltaSubscriptionImplTest() : DeltaSubscriptionTestHarness(GetParam()){}; // We need to destroy the subscription before the test's destruction, because the subscription's // destructor removes its watch from the NewGrpcMuxImpl, and that removal process involves @@ -21,7 +22,10 @@ class DeltaSubscriptionImplTest : public DeltaSubscriptionTestHarness, public te void TearDown() override { doSubscriptionTearDown(); } }; -TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { +INSTANTIATE_TEST_SUITE_P(DeltaSubscriptionImplTest, DeltaSubscriptionImplTest, + testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); + +TEST_P(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { startSubscription({"name1", "name2", "name3"}); expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); subscription_->updateResourceInterest({"name3", "name4"}); @@ -39,7 +43,7 @@ TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { // Also demonstrates the collapsing of subscription interest updates into a single // request. (This collapsing happens any time multiple updates arrive before a request // can be sent, not just with pausing: rate limiting or a down gRPC stream would also do it). -TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { +TEST_P(DeltaSubscriptionImplTest, PauseHoldsRequest) { startSubscription({"name1", "name2", "name3"}); auto resume_sub = subscription_->pause(); // If nested pause wasn't handled correctly, the single expectedSendMessage below would be @@ -56,14 +60,14 @@ TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { subscription_->updateResourceInterest({"name3", "name4"}); } -TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { +TEST_P(DeltaSubscriptionImplTest, ResponseCausesAck) { startSubscription({"name1"}); deliverConfigUpdate({"name1"}, "someversion", true); } // Checks that after a pause(), no ACK requests are sent until resume(), but that after the // resume, *all* ACKs that arrived during the pause are sent (in order). -TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { +TEST_P(DeltaSubscriptionImplTest, PauseQueuesAcks) { startSubscription({"name1", "name2", "name3"}); auto resume_sub = subscription_->pause(); // The server gives us our first version of resource name1. @@ -77,8 +81,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_nonce(nonce); message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); - static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message), control_plane_stats_); + onDiscoveryResponse(std::move(message)); } // The server gives us our first version of resource name2. // subscription_ now wants to ACK name1 and then name2 (but can't due to pause). @@ -91,8 +94,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_nonce(nonce); message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); - static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message), control_plane_stats_); + onDiscoveryResponse(std::move(message)); } // The server gives us an updated version of resource name1. // subscription_ now wants to ACK name1A, then name2, then name1B (but can't due to pause). @@ -105,8 +107,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_nonce(nonce); message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); - static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message), control_plane_stats_); + onDiscoveryResponse(std::move(message)); } // All ACK sendMessage()s will happen upon calling resume(). EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)) @@ -122,7 +123,11 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { // in the correct order. } -TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { +class DeltaSubscriptionNoGrpcStreamTest : public testing::TestWithParam {}; +INSTANTIATE_TEST_SUITE_P(DeltaSubscriptionNoGrpcStreamTest, DeltaSubscriptionNoGrpcStreamTest, + testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); + +TEST_P(DeltaSubscriptionNoGrpcStreamTest, NoGrpcStream) { Stats::IsolatedStoreImpl stats_store; SubscriptionStats stats(Utility::generateStats(stats_store)); @@ -141,9 +146,16 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { const Protobuf::MethodDescriptor* method_descriptor = Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"); - NewGrpcMuxImplSharedPtr xds_context = std::make_shared( - std::unique_ptr(async_client), dispatcher, *method_descriptor, random, - stats_store, rate_limit_settings, local_info); + GrpcMuxSharedPtr xds_context; + if (GetParam() == LegacyOrUnified::Unified) { + xds_context = std::make_shared( + std::unique_ptr(async_client), dispatcher, *method_descriptor, + random, stats_store, rate_limit_settings, local_info, false); + } else { + xds_context = std::make_shared( + std::unique_ptr(async_client), dispatcher, *method_descriptor, + random, stats_store, rate_limit_settings, local_info); + } GrpcSubscriptionImplPtr subscription = std::make_unique( xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment, diff --git a/test/common/config/delta_subscription_state_old_test.cc b/test/common/config/delta_subscription_state_old_test.cc new file mode 100644 index 000000000000..6d7ceda982ca --- /dev/null +++ b/test/common/config/delta_subscription_state_old_test.cc @@ -0,0 +1,702 @@ +#include + +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/config/delta_subscription_state.h" +#include "source/common/config/utility.h" +#include "source/common/stats/isolated_store_impl.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::IsSubstring; +using testing::NiceMock; +using testing::Throw; +using testing::UnorderedElementsAre; +using testing::UnorderedElementsAreArray; + +namespace Envoy { +namespace Config { +namespace { + +const char TypeUrl[] = "type.googleapis.com/envoy.config.cluster.v3.Cluster"; + +class OldDeltaSubscriptionStateTestBase : public testing::Test { +protected: + OldDeltaSubscriptionStateTestBase(const std::string& type_url, + const absl::flat_hash_set initial_resources = { + "name1", "name2", "name3"}) { + ttl_timer_ = new Event::MockTimer(&dispatcher_); + + // Disable the explicit wildcard resource feature, so OldDeltaSubscriptionState will be picked + // up. + { + TestScopedRuntime scoped_runtime_; + Runtime::LoaderSingleton::getExisting()->mergeValues({ + {"envoy.restart_features.explicit_wildcard_resource", "false"}, + }); + state_ = std::make_unique(type_url, callbacks_, + local_info_, dispatcher_); + } + updateSubscriptionInterest(initial_resources, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + // UnorderedElementsAre("name1", "name2", "name3")); + UnorderedElementsAreArray(initial_resources.cbegin(), initial_resources.cend())); + } + + void updateSubscriptionInterest(const absl::flat_hash_set& cur_added, + const absl::flat_hash_set& cur_removed) { + state_->updateSubscriptionInterest(cur_added, cur_removed); + } + + std::unique_ptr getNextRequestAckless() { + return std::make_unique( + state_->getNextRequestAckless()); + } + + UpdateAck + handleResponse(const envoy::service::discovery::v3::DeltaDiscoveryResponse& response_proto) { + return state_->handleResponse(response_proto); + } + + UpdateAck deliverDiscoveryResponse( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info, absl::optional nonce = absl::nullopt, + bool expect_config_update_call = true, absl::optional updated_resources = {}) { + envoy::service::discovery::v3::DeltaDiscoveryResponse message; + *message.mutable_resources() = added_resources; + *message.mutable_removed_resources() = removed_resources; + message.set_system_version_info(version_info); + if (nonce.has_value()) { + message.set_nonce(nonce.value()); + } + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)) + .Times(expect_config_update_call ? 1 : 0) + .WillRepeatedly(Invoke([updated_resources](const auto& added, const auto&, const auto&) { + if (updated_resources) { + EXPECT_EQ(added.size(), *updated_resources); + } + })); + return handleResponse(message); + } + + UpdateAck deliverBadDiscoveryResponse( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info, std::string nonce, std::string error_message) { + envoy::service::discovery::v3::DeltaDiscoveryResponse message; + *message.mutable_resources() = added_resources; + *message.mutable_removed_resources() = removed_resources; + message.set_system_version_info(version_info); + message.set_nonce(nonce); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)).WillOnce(Throw(EnvoyException(error_message))); + return handleResponse(message); + } + + void markStreamFresh() { state_->markStreamFresh(); } + + bool subscriptionUpdatePending() { return state_->subscriptionUpdatePending(); } + + NiceMock callbacks_; + NiceMock local_info_; + NiceMock dispatcher_; + Event::MockTimer* ttl_timer_; + // We start out interested in three resources: name1, name2, and name3. + std::unique_ptr state_; +}; + +Protobuf::RepeatedPtrField +populateRepeatedResource(std::vector> items) { + Protobuf::RepeatedPtrField add_to; + for (const auto& item : items) { + auto* resource = add_to.Add(); + resource->set_name(item.first); + resource->set_version(item.second); + } + return add_to; +} + +class OldDeltaSubscriptionStateTest : public OldDeltaSubscriptionStateTestBase { +public: + OldDeltaSubscriptionStateTest() : OldDeltaSubscriptionStateTestBase(TypeUrl) {} +}; + +// Delta subscription state of a wildcard subscription request. +class OldWildcardDeltaSubscriptionStateTest : public OldDeltaSubscriptionStateTestBase { +public: + OldWildcardDeltaSubscriptionStateTest() : OldDeltaSubscriptionStateTestBase(TypeUrl, {}) {} +}; + +// Basic gaining/losing interest in resources should lead to subscription updates. +TEST_F(OldDeltaSubscriptionStateTest, SubscribeAndUnsubscribe) { + { + updateSubscriptionInterest({"name4"}, {"name1"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("name1")); + } + { + updateSubscriptionInterest({"name1"}, {"name3", "name4"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name1")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("name3", "name4")); + } +} + +// Resources has no subscriptions should not be tracked. +TEST_F(OldDeltaSubscriptionStateTest, NewPushDoesntAddUntrackedResources) { + { // Add "name4", "name5", "name6" and remove "name1", "name2", "name3". + updateSubscriptionInterest({"name4", "name5", "name6"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + { + // On Reconnection, only "name4", "name5", "name6" are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 1); + EXPECT_TRUE(cur_request->initial_resource_versions().contains("name6")); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + } +} + +// Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't +// strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a +// request was able to be sent, two requests will be sent. The following tests demonstrate this. +// +// If Envoy decided it wasn't interested in a resource and then (before a request was sent) decided +// it was again, for all we know, it dropped that resource in between and needs to retrieve it +// again. So, we *should* send a request "re-"subscribing. This means that the server needs to +// interpret the resource_names_subscribe field as "send these resources even if you think Envoy +// already has them". +TEST_F(OldDeltaSubscriptionStateTest, RemoveThenAdd) { + updateSubscriptionInterest({}, {"name3"}); + updateSubscriptionInterest({"name3"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name3")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Due to how our implementation provides the required behavior tested in RemoveThenAdd, the +// add-then-remove case *also* causes the resource to be referred to in the request (as an +// unsubscribe). +// Unlike the remove-then-add case, this one really is unnecessary, and ideally we would have +// the request simply not include any mention of the resource. Oh well. +// This test is just here to illustrate that this behavior exists, not to enforce that it +// should be like this. What *is* important: the server must happily and cleanly ignore +// "unsubscribe from [resource name I have never before referred to]" requests. +TEST_F(OldDeltaSubscriptionStateTest, AddThenRemove) { + updateSubscriptionInterest({"name4"}, {}); + updateSubscriptionInterest({}, {"name4"}); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("name4")); +} + +// add/remove/add == add. +TEST_F(OldDeltaSubscriptionStateTest, AddRemoveAdd) { + updateSubscriptionInterest({"name4"}, {}); + updateSubscriptionInterest({}, {"name4"}); + updateSubscriptionInterest({"name4"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// remove/add/remove == remove. +TEST_F(OldDeltaSubscriptionStateTest, RemoveAddRemove) { + updateSubscriptionInterest({}, {"name3"}); + updateSubscriptionInterest({"name3"}, {}); + updateSubscriptionInterest({}, {"name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("name3")); +} + +// Starts with 1,2,3. 4 is added/removed/added. In those same updates, 1,2,3 are +// removed/added/removed. End result should be 4 added and 1,2,3 removed. +TEST_F(OldDeltaSubscriptionStateTest, BothAddAndRemove) { + updateSubscriptionInterest({"name4"}, {"name1", "name2", "name3"}); + updateSubscriptionInterest({"name1", "name2", "name3"}, {"name4"}); + updateSubscriptionInterest({"name4"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); +} + +TEST_F(OldDeltaSubscriptionStateTest, CumulativeUpdates) { + updateSubscriptionInterest({"name4"}, {}); + updateSubscriptionInterest({"name5"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4", "name5")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Verifies that a sequence of good and bad responses from the server all get the appropriate +// ACKs/NACKs from Envoy. +TEST_F(OldDeltaSubscriptionStateTest, AckGenerated) { + // The xDS server's first response includes items for name1 and 2, but not 3. + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + // The next response updates 1 and 2, and adds 3. + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource( + {{"name1", "version1B"}, {"name2", "version2B"}, {"name3", "version3A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug2", "nonce2"); + EXPECT_EQ("nonce2", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + // The next response tries but fails to update all 3, and so should produce a NACK. + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource( + {{"name1", "version1C"}, {"name2", "version2C"}, {"name3", "version3B"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, "debug3", "nonce3", "oh no"); + EXPECT_EQ("nonce3", ack.nonce_); + EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + // The last response successfully updates all 3. + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource( + {{"name1", "version1D"}, {"name2", "version2D"}, {"name3", "version3C"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug4", "nonce4"); + EXPECT_EQ("nonce4", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + // Bad response error detail is truncated if it's too large. + { + const std::string very_large_error_message(1 << 20, 'A'); + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource( + {{"name1", "version1D"}, {"name2", "version2D"}, {"name3", "version3D"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverBadDiscoveryResponse(added_resources, {}, "debug5", "nonce5", + very_large_error_message); + EXPECT_EQ("nonce5", ack.nonce_); + EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + EXPECT_TRUE(absl::EndsWith(ack.error_detail_.message(), "AAAAAAA...(truncated)")); + EXPECT_LT(ack.error_detail_.message().length(), very_large_error_message.length()); + } +} + +// Tests population of the initial_resource_versions map in the first request of a new stream. +// Tests that +// 1) resources we have a version of are present in the map, +// 2) resources we are interested in but don't have are not present, and +// 3) resources we have lost interest in are not present. +TEST_F(OldDeltaSubscriptionStateTest, ResourceGoneLeadsToBlankInitialVersion) { + { + // The xDS server's first update includes items for name1 and 2, but not 3. + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + EXPECT_EQ("version1A", cur_request->initial_resource_versions().at("name1")); + EXPECT_EQ("version2A", cur_request->initial_resource_versions().at("name2")); + EXPECT_EQ(cur_request->initial_resource_versions().end(), + cur_request->initial_resource_versions().find("name3")); + } + + { + // The next update updates 1, removes 2, and adds 3. The map should then have 1 and 3. + Protobuf::RepeatedPtrField add1_3 = + populateRepeatedResource({{"name1", "version1B"}, {"name3", "version3A"}}); + Protobuf::RepeatedPtrField remove2; + *remove2.Add() = "name2"; + EXPECT_CALL(*ttl_timer_, disableTimer()).Times(2); + deliverDiscoveryResponse(add1_3, remove2, "debugversion2"); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + EXPECT_EQ("version1B", cur_request->initial_resource_versions().at("name1")); + EXPECT_EQ(cur_request->initial_resource_versions().end(), + cur_request->initial_resource_versions().find("name2")); + EXPECT_EQ("version3A", cur_request->initial_resource_versions().at("name3")); + } + + { + // The next update removes 1 and 3. The map we send the server should be empty... + Protobuf::RepeatedPtrField remove1_3; + *remove1_3.Add() = "name1"; + *remove1_3.Add() = "name3"; + deliverDiscoveryResponse({}, remove1_3, "debugversion3"); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + + { + // ...but our own map should remember our interest. In particular, losing interest in a + // resource should cause its name to appear in the next request's resource_names_unsubscribe. + updateSubscriptionInterest({"name4"}, {"name1", "name2"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("name1", "name2")); + } +} + +// For non-wildcard subscription, upon a reconnection, the server is supposed to assume a +// blank slate for the Envoy's state (hence the need for initial_resource_versions). +// The resource_names_subscribe of the first message must therefore be every resource the +// Envoy is interested in. +// +// resource_names_unsubscribe, on the other hand, is always blank in the first request - even if, +// in between the last request of the last stream and the first request of the new stream, Envoy +// lost interest in a resource. The unsubscription implicitly takes effect by simply saying +// nothing about the resource in the newly reconnected stream. +TEST_F(OldDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + updateSubscriptionInterest({"name4"}, {"name1"}); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + // Regarding the resource_names_subscribe field: + // name1: do not include: we lost interest. + // name2: yes do include: we are interested, its non-wildcard, and we have a version of it. + // name3: yes do include: even though we don't have a version of it, we are interested. + // name4: yes do include: we are newly interested. (If this wasn't a stream reconnect, only + // name4 would belong in this subscribe field). + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name2", "name3", "name4")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// For wildcard subscription, upon a reconnection, the server is supposed to assume a +// blank slate for the Envoy's state (hence the need for initial_resource_versions), and +// the resource_names_subscribe and resource_names_unsubscribe must be empty (as is expected +// of every wildcard first message). This is true even if in between the last request of the +// last stream and the first request of the new stream, Envoy gained or lost interest in a +// resource. The subscription & unsubscription implicitly takes effect by simply requesting a +// wildcard subscription in the newly reconnected stream. +TEST_F(OldWildcardDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + updateSubscriptionInterest({"name3"}, {"name1"}); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + // Regarding the resource_names_subscribe field: + // name1: do not include: we lost interest. + // name2: do not include: we are interested, but for wildcard it shouldn't be provided. + // name4: do not include: although we are newly interested, an initial wildcard request + // must be with no resources. + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// All resources from the server should be tracked. +TEST_F(OldWildcardDeltaSubscriptionStateTest, AllResourcesFromServerAreTrackedInWildcardXDS) { + { // Add "name4", "name5", "name6" and remove "name1", "name2", "name3". + updateSubscriptionInterest({"name4", "name5", "name6"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + { + // On Reconnection, only "name4", "name5", "name6" are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 4); + EXPECT_EQ(cur_request->initial_resource_versions().at("name1"), "version1A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("bluhbluh"), "bluh"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name2"), "version2A"); + } +} + +// initial_resource_versions should not be present on messages after the first in a stream. +TEST_F(OldDeltaSubscriptionStateTest, InitialVersionMapFirstMessageOnly) { + // First, verify that the first message of a new stream sends initial versions. + { + // The xDS server's first update gives us all three resources. + Protobuf::RepeatedPtrField add_all = + populateRepeatedResource( + {{"name1", "version1A"}, {"name2", "version2A"}, {"name3", "version3A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add_all, {}, "debugversion1"); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + EXPECT_EQ("version1A", cur_request->initial_resource_versions().at("name1")); + EXPECT_EQ("version2A", cur_request->initial_resource_versions().at("name2")); + EXPECT_EQ("version3A", cur_request->initial_resource_versions().at("name3")); + } + // Then, after updating the resources but not reconnecting the stream, verify that initial + // versions are not sent. + { + updateSubscriptionInterest({"name4"}, {}); + // The xDS server updates our resources, and gives us our newly requested one too. + Protobuf::RepeatedPtrField add_all = + populateRepeatedResource({{"name1", "version1B"}, + {"name2", "version2B"}, + {"name3", "version3B"}, + {"name4", "version4A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add_all, {}, "debugversion2"); + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } +} + +TEST_F(OldDeltaSubscriptionStateTest, CheckUpdatePending) { + // Note that the test fixture ctor causes the first request to be "sent", so we start in the + // middle of a stream, with our initially interested resources having been requested already. + EXPECT_FALSE(subscriptionUpdatePending()); + updateSubscriptionInterest({}, {}); // no change + EXPECT_FALSE(subscriptionUpdatePending()); + markStreamFresh(); + EXPECT_TRUE(subscriptionUpdatePending()); // no change, BUT fresh stream + updateSubscriptionInterest({}, {"name3"}); // one removed + EXPECT_TRUE(subscriptionUpdatePending()); + updateSubscriptionInterest({"name3"}, {}); // one added + EXPECT_TRUE(subscriptionUpdatePending()); +} + +// The next three tests test that duplicate resource names (whether additions or removals) cause +// DeltaSubscriptionState to reject the update without even trying to hand it to the consuming +// API's onConfigUpdate(). +TEST_F(OldDeltaSubscriptionStateTest, DuplicatedAdd) { + Protobuf::RepeatedPtrField additions = + populateRepeatedResource({{"name1", "version1A"}, {"name1", "sdfsdfsdfds"}}); + UpdateAck ack = deliverDiscoveryResponse(additions, {}, "debugversion1", absl::nullopt, false); + EXPECT_EQ("duplicate name name1 found among added/updated resources", + ack.error_detail_.message()); +} + +TEST_F(OldDeltaSubscriptionStateTest, DuplicatedRemove) { + Protobuf::RepeatedPtrField removals; + *removals.Add() = "name1"; + *removals.Add() = "name1"; + UpdateAck ack = deliverDiscoveryResponse({}, removals, "debugversion1", absl::nullopt, false); + EXPECT_EQ("duplicate name name1 found in the union of added+removed resources", + ack.error_detail_.message()); +} + +TEST_F(OldDeltaSubscriptionStateTest, AddedAndRemoved) { + Protobuf::RepeatedPtrField additions = + populateRepeatedResource({{"name1", "version1A"}}); + Protobuf::RepeatedPtrField removals; + *removals.Add() = "name1"; + UpdateAck ack = + deliverDiscoveryResponse(additions, removals, "debugversion1", absl::nullopt, false); + EXPECT_EQ("duplicate name name1 found in the union of added+removed resources", + ack.error_detail_.message()); +} + +TEST_F(OldDeltaSubscriptionStateTest, ResourceTTL) { + Event::SimulatedTimeSystem time_system; + time_system.setSystemTime(std::chrono::milliseconds(0)); + + auto create_resource_with_ttl = [](absl::optional ttl_s, + bool include_resource) { + Protobuf::RepeatedPtrField added_resources; + auto* resource = added_resources.Add(); + resource->set_name("name1"); + resource->set_version("version1A"); + + if (include_resource) { + resource->mutable_resource(); + } + + if (ttl_s) { + ProtobufWkt::Duration ttl; + ttl.set_seconds(ttl_s->count()); + resource->mutable_ttl()->CopyFrom(ttl); + } + + return added_resources; + }; + + { + EXPECT_CALL(*ttl_timer_, enabled()); + EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(1000), _)); + deliverDiscoveryResponse(create_resource_with_ttl(std::chrono::seconds(1), true), {}, "debug1", + "nonce1"); + } + + { + // Increase the TTL. + EXPECT_CALL(*ttl_timer_, enabled()); + EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(2000), _)); + deliverDiscoveryResponse(create_resource_with_ttl(std::chrono::seconds(2), true), {}, "debug1", + "nonce1", true, 1); + } + + { + // Refresh the TTL with a heartbeat. The resource should not be passed to the update callbacks. + EXPECT_CALL(*ttl_timer_, enabled()); + deliverDiscoveryResponse(create_resource_with_ttl(std::chrono::seconds(2), false), {}, "debug1", + "nonce1", true, 0); + } + + // Remove the TTL. + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(create_resource_with_ttl(absl::nullopt, true), {}, "debug1", "nonce1", + true, 1); + + // Add back the TTL. + EXPECT_CALL(*ttl_timer_, enabled()); + EXPECT_CALL(*ttl_timer_, enableTimer(_, _)); + deliverDiscoveryResponse(create_resource_with_ttl(std::chrono::seconds(2), true), {}, "debug1", + "nonce1"); + + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)); + EXPECT_CALL(*ttl_timer_, disableTimer()); + time_system.setSystemTime(std::chrono::seconds(2)); + + // Invoke the TTL. + ttl_timer_->invokeCallback(); +} + +TEST_F(OldDeltaSubscriptionStateTest, TypeUrlMismatch) { + envoy::service::discovery::v3::DeltaDiscoveryResponse message; + + Protobuf::RepeatedPtrField additions; + auto* resource = additions.Add(); + resource->set_name("name1"); + resource->set_version("version1"); + resource->mutable_resource()->set_type_url("foo"); + + *message.mutable_resources() = additions; + *message.mutable_removed_resources() = {}; + message.set_system_version_info("version1"); + message.set_nonce("nonce1"); + message.set_type_url("bar"); + + EXPECT_CALL(callbacks_, + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)) + .WillOnce(Invoke([](Envoy::Config::ConfigUpdateFailureReason, const EnvoyException* e) { + EXPECT_TRUE(IsSubstring("", "", + "type URL foo embedded in an individual Any does not match the " + "message-wide type URL bar", + e->what())); + })); + handleResponse(message); +} + +class OldVhdsDeltaSubscriptionStateTest : public OldDeltaSubscriptionStateTestBase { +public: + OldVhdsDeltaSubscriptionStateTest() + : OldDeltaSubscriptionStateTestBase("envoy.config.route.v3.VirtualHost") {} +}; + +TEST_F(OldVhdsDeltaSubscriptionStateTest, ResourceTTL) { + Event::SimulatedTimeSystem time_system; + time_system.setSystemTime(std::chrono::milliseconds(0)); + + TestScopedRuntime scoped_runtime; + + auto create_resource_with_ttl = [](bool include_resource) { + Protobuf::RepeatedPtrField added_resources; + auto* resource = added_resources.Add(); + resource->set_name("name1"); + resource->set_version("version1A"); + + if (include_resource) { + resource->mutable_resource(); + } + + ProtobufWkt::Duration ttl; + ttl.set_seconds(1); + resource->mutable_ttl()->CopyFrom(ttl); + + return added_resources; + }; + + EXPECT_CALL(*ttl_timer_, enabled()); + EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(1000), _)); + deliverDiscoveryResponse(create_resource_with_ttl(true), {}, "debug1", "nonce1", true, 1); + + // Heartbeat update should not be propagated to the subscription callback. + EXPECT_CALL(*ttl_timer_, enabled()); + deliverDiscoveryResponse(create_resource_with_ttl(false), {}, "debug1", "nonce1", true, 0); + + // When runtime flag is disabled, maintain old behavior where we do propagate + // the update to the subscription callback. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.vhds_heartbeats", "false"}}); + + EXPECT_CALL(*ttl_timer_, enabled()); + deliverDiscoveryResponse(create_resource_with_ttl(false), {}, "debug1", "nonce1", true, 1); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 5c816a194b72..e908cbd08047 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -19,6 +19,7 @@ using testing::IsSubstring; using testing::NiceMock; +using testing::Pair; using testing::Throw; using testing::UnorderedElementsAre; using testing::UnorderedElementsAreArray; @@ -29,27 +30,41 @@ namespace { const char TypeUrl[] = "type.googleapis.com/envoy.config.cluster.v3.Cluster"; enum class LegacyOrUnified { Legacy, Unified }; +const auto WildcardStr = std::string(Wildcard); + +Protobuf::RepeatedPtrField +populateRepeatedResource(std::vector> items) { + Protobuf::RepeatedPtrField add_to; + for (const auto& item : items) { + auto* resource = add_to.Add(); + resource->set_name(item.first); + resource->set_version(item.second); + } + return add_to; +} + +Protobuf::RepeatedPtrField populateRepeatedString(std::vector items) { + Protobuf::RepeatedPtrField add_to; + for (const auto& item : items) { + auto* str = add_to.Add(); + *str = item; + } + return add_to; +} class DeltaSubscriptionStateTestBase : public testing::TestWithParam { protected: - DeltaSubscriptionStateTestBase( - const std::string& type_url, const bool wildcard, LegacyOrUnified legacy_or_unified, - const absl::flat_hash_set initial_resources = {"name1", "name2", "name3"}) + DeltaSubscriptionStateTestBase(const std::string& type_url, LegacyOrUnified legacy_or_unified) : should_use_unified_(legacy_or_unified == LegacyOrUnified::Unified) { ttl_timer_ = new Event::MockTimer(&dispatcher_); if (should_use_unified_) { - state_ = std::make_unique( - type_url, callbacks_, dispatcher_, wildcard); + state_ = std::make_unique(type_url, callbacks_, + dispatcher_); } else { - state_ = std::make_unique( - type_url, callbacks_, local_info_, dispatcher_, wildcard); + state_ = std::make_unique(type_url, callbacks_, + local_info_, dispatcher_); } - updateSubscriptionInterest(initial_resources, {}); - auto cur_request = getNextRequestAckless(); - EXPECT_THAT(cur_request->resource_names_subscribe(), - // UnorderedElementsAre("name1", "name2", "name3")); - UnorderedElementsAreArray(initial_resources.cbegin(), initial_resources.cend())); } void updateSubscriptionInterest(const absl::flat_hash_set& cur_added, @@ -112,6 +127,16 @@ class DeltaSubscriptionStateTestBase : public testing::TestWithParam> added_resources, + std::vector removed_resources, + const std::string& version_info) { + EXPECT_CALL(*ttl_timer_, disableTimer()); + auto add = populateRepeatedResource(added_resources); + auto remove = populateRepeatedString(removed_resources); + return deliverDiscoveryResponse(add, remove, version_info); + } + void markStreamFresh() { if (should_use_unified_) { absl::get<1>(state_)->markStreamFresh(); @@ -138,30 +163,335 @@ class DeltaSubscriptionStateTestBase : public testing::TestWithParam -populateRepeatedResource(std::vector> items) { - Protobuf::RepeatedPtrField add_to; - for (const auto& item : items) { - auto* resource = add_to.Add(); - resource->set_name(item.first); - resource->set_version(item.second); +class DeltaSubscriptionStateTestBlank : public DeltaSubscriptionStateTestBase { +public: + DeltaSubscriptionStateTestBlank() : DeltaSubscriptionStateTestBase(TypeUrl, GetParam()) {} +}; + +INSTANTIATE_TEST_SUITE_P(DeltaSubscriptionStateTestBlank, DeltaSubscriptionStateTestBlank, + testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); + +// Checks if subscriptionUpdatePending returns correct value depending on scenario. +TEST_P(DeltaSubscriptionStateTestBlank, SubscriptionPendingTest) { + // We should send a request, because nothing has been sent out yet. Note that this means + // subscribing to the wildcard resource. + EXPECT_TRUE(subscriptionUpdatePending()); + getNextRequestAckless(); + + // We should not be sending any requests if nothing yet changed since last time we sent a + // request. Or if out subscription interest was not modified. + EXPECT_FALSE(subscriptionUpdatePending()); + updateSubscriptionInterest({}, {}); + EXPECT_FALSE(subscriptionUpdatePending()); + + // We should send a request, because our interest changed (we are interested in foo now). + updateSubscriptionInterest({"foo"}, {}); + EXPECT_TRUE(subscriptionUpdatePending()); + getNextRequestAckless(); + + // We should send a request after a new stream is established if we are interested in some + // resource. + EXPECT_FALSE(subscriptionUpdatePending()); + markStreamFresh(); + EXPECT_TRUE(subscriptionUpdatePending()); + getNextRequestAckless(); + + // We should send a request, because our interest changed (we are not interested in foo and in + // wildcard resource any more). + EXPECT_FALSE(subscriptionUpdatePending()); + updateSubscriptionInterest({}, {WildcardStr, "foo"}); + EXPECT_TRUE(subscriptionUpdatePending()); + getNextRequestAckless(); + + // We should not be sending anything after stream reestablishing, because we are not interested in + // anything. + markStreamFresh(); + EXPECT_FALSE(subscriptionUpdatePending()); +} + +// Check if requested resources are dropped from the cache immediately after losing interest in them +// in case we don't have a wildcard subscription. In such case there's no ambiguity whether a +// dropped resource could come from the wildcard subscription. +// +// Dropping from the cache can be seen through the initial_resource_versions field in the initial +// request. +TEST_P(DeltaSubscriptionStateTestBlank, ResourceTransitionNonWildcardFromRequestedToDropped) { + updateSubscriptionInterest({"foo", "bar"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"bar", "1"}}, {}, "d1"); + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("bar", "1"))); + + updateSubscriptionInterest({}, {"foo"}); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_THAT(req->resource_names_unsubscribe(), UnorderedElementsAre("foo")); + deliverSimpleDiscoveryResponse({}, {"foo"}, "d2"); + + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), UnorderedElementsAre(Pair("bar", "1"))); +} + +// Check if we keep foo resource in cache even if we lost interest in it. It could be a part of the +// wildcard subscription. +TEST_P(DeltaSubscriptionStateTestBlank, ResourceTransitionWithWildcardFromRequestedToAmbiguous) { + // subscribe to foo and make sure we have it. + updateSubscriptionInterest({WildcardStr, "foo", "bar"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"bar", "1"}, {"wild1", "1"}}, {}, "d1"); + + // ensure that foo is a part of resource versions + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("bar", "1"), Pair("wild1", "1"))); + + // unsubscribe from foo just before the stream breaks, make sure we still send the foo initial + // version + updateSubscriptionInterest({}, {"foo"}); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_THAT(req->resource_names_unsubscribe(), UnorderedElementsAre("foo")); + // didn't receive a reply + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("bar", "1"), Pair("wild1", "1"))); +} + +// Check that foo and bar do not appear in initial versions after we lost interest. Foo won't +// appear, because we got a reply from server confirming dropping the resource. Bar won't appear +// because we never got a reply from server with a version of it. +TEST_P(DeltaSubscriptionStateTestBlank, ResourceTransitionWithWildcardFromRequestedToDropped) { + // subscribe to foo and bar and make sure we have it. + updateSubscriptionInterest({WildcardStr, "foo", "bar", "baz"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), + UnorderedElementsAre(WildcardStr, "foo", "bar", "baz")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"baz", "1"}, {"wild1", "1"}}, {}, "d1"); + + // ensure that foo is a part of resource versions, bar won't be, because we don't have its version + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), + UnorderedElementsAre(WildcardStr, "foo", "bar", "baz")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("baz", "1"), Pair("wild1", "1"))); + + // unsubscribe from foo and bar, and receive an confirmation about dropping foo. Now neither will + // appear in initial versions in the initial request after breaking the stream. + updateSubscriptionInterest({}, {"foo", "bar"}); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_THAT(req->resource_names_unsubscribe(), UnorderedElementsAre("foo", "bar")); + deliverSimpleDiscoveryResponse({}, {"foo"}, "d2"); + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "baz")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("baz", "1"), Pair("wild1", "1"))); +} + +// Check that we move the resource from wildcard subscription to requested without losing version +// information about it. +TEST_P(DeltaSubscriptionStateTestBlank, ResourceTransitionWithWildcardFromWildcardToRequested) { + updateSubscriptionInterest({}, {}); + auto req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"wild1", "1"}}, {}, "d1"); + + updateSubscriptionInterest({"foo"}, {}); + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("wild1", "1"))); +} + +// Check that we move the ambiguous resource to requested without losing version information about +// it. +TEST_P(DeltaSubscriptionStateTestBlank, ResourceTransitionWithWildcardFromAmbiguousToRequested) { + updateSubscriptionInterest({WildcardStr, "foo"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"wild1", "1"}}, {}, "d1"); + + // make foo ambiguous and request it again + updateSubscriptionInterest({}, {"foo"}); + updateSubscriptionInterest({"foo"}, {}); + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("wild1", "1"))); +} + +// Check if we correctly decide to send a legacy wildcard initial request. +TEST_P(DeltaSubscriptionStateTestBlank, LegacyWildcardInitialRequests) { + updateSubscriptionInterest({}, {}); + auto req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + deliverSimpleDiscoveryResponse({{"wild1", "1"}}, {}, "d1"); + + // unsubscribing from unknown resource should keep the legacy + // wildcard mode + updateSubscriptionInterest({}, {"unknown"}); + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + + updateSubscriptionInterest({"foo"}, {}); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("foo")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}}, {}, "d1"); + updateSubscriptionInterest({}, {"foo"}); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_THAT(req->resource_names_unsubscribe(), UnorderedElementsAre("foo")); + deliverSimpleDiscoveryResponse({}, {"foo"}, "d1"); + + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); +} + +// Check that ambiguous resources may also receive a heartbeat message. +TEST_P(DeltaSubscriptionStateTestBlank, AmbiguousResourceTTL) { + Event::SimulatedTimeSystem time_system; + time_system.setSystemTime(std::chrono::milliseconds(0)); + + auto create_resource_with_ttl = [](absl::string_view name, absl::string_view version, + absl::optional ttl_s, + bool include_resource) { + Protobuf::RepeatedPtrField added_resources; + auto* resource = added_resources.Add(); + resource->set_name(std::string(name)); + resource->set_version(std::string(version)); + + if (include_resource) { + resource->mutable_resource(); + } + + if (ttl_s) { + ProtobufWkt::Duration ttl; + ttl.set_seconds(ttl_s->count()); + resource->mutable_ttl()->CopyFrom(ttl); + } + + return added_resources; + }; + + updateSubscriptionInterest({WildcardStr, "foo"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre(WildcardStr, "foo")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + { + EXPECT_CALL(*ttl_timer_, enabled()); + EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(1000), _)); + deliverDiscoveryResponse(create_resource_with_ttl("foo", "1", std::chrono::seconds(1), true), + {}, "debug1", "nonce1"); } - return add_to; + + // make foo ambiguous + updateSubscriptionInterest({}, {"foo"}); + req = getNextRequestAckless(); + EXPECT_TRUE(req->resource_names_subscribe().empty()); + EXPECT_THAT(req->resource_names_unsubscribe(), UnorderedElementsAre("foo")); + { + // Refresh the TTL with a heartbeat. The resource should not be passed to the update callbacks. + EXPECT_CALL(*ttl_timer_, enabled()); + deliverDiscoveryResponse(create_resource_with_ttl("foo", "1", std::chrono::seconds(1), false), + {}, "debug1", "nonce1", true, 0); + } + + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)); + EXPECT_CALL(*ttl_timer_, disableTimer()); + time_system.setSystemTime(std::chrono::seconds(2)); + + // Invoke the TTL. + ttl_timer_->invokeCallback(); } -class DeltaSubscriptionStateTest : public DeltaSubscriptionStateTestBase { +// Checks that we ignore resources that we haven't asked for. +TEST_P(DeltaSubscriptionStateTestBlank, IgnoreSuperfluousResources) { + updateSubscriptionInterest({"foo", "bar"}, {}); + auto req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_TRUE(req->initial_resource_versions().empty()); + deliverSimpleDiscoveryResponse({{"foo", "1"}, {"bar", "1"}, {"did-not-want", "1"}, {"spam", "1"}}, + {}, "d1"); + + // Force a reconnection and resending of the "initial" message. If the initial_resource_versions + // in the message contains resources like did-not-want or spam, we haven't ignored that as we + // should. + markStreamFresh(); + req = getNextRequestAckless(); + EXPECT_THAT(req->resource_names_subscribe(), UnorderedElementsAre("foo", "bar")); + EXPECT_TRUE(req->resource_names_unsubscribe().empty()); + EXPECT_THAT(req->initial_resource_versions(), + UnorderedElementsAre(Pair("foo", "1"), Pair("bar", "1"))); +} + +class DeltaSubscriptionStateTestWithResources : public DeltaSubscriptionStateTestBase { +protected: + DeltaSubscriptionStateTestWithResources( + const std::string& type_url, LegacyOrUnified legacy_or_unified, + const absl::flat_hash_set initial_resources = {"name1", "name2", "name3"}) + : DeltaSubscriptionStateTestBase(type_url, legacy_or_unified) { + updateSubscriptionInterest(initial_resources, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + // UnorderedElementsAre("name1", "name2", "name3")); + UnorderedElementsAreArray(initial_resources.cbegin(), initial_resources.cend())); + } +}; + +class DeltaSubscriptionStateTest : public DeltaSubscriptionStateTestWithResources { public: - DeltaSubscriptionStateTest() : DeltaSubscriptionStateTestBase(TypeUrl, false, GetParam()) {} + DeltaSubscriptionStateTest() : DeltaSubscriptionStateTestWithResources(TypeUrl, GetParam()) {} }; INSTANTIATE_TEST_SUITE_P(DeltaSubscriptionStateTest, DeltaSubscriptionStateTest, testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); // Delta subscription state of a wildcard subscription request. -class WildcardDeltaSubscriptionStateTest : public DeltaSubscriptionStateTestBase { +class WildcardDeltaSubscriptionStateTest : public DeltaSubscriptionStateTestWithResources { public: WildcardDeltaSubscriptionStateTest() - : DeltaSubscriptionStateTestBase(TypeUrl, true, GetParam(), {}) {} + : DeltaSubscriptionStateTestWithResources(TypeUrl, GetParam(), {}) {} }; INSTANTIATE_TEST_SUITE_P(WildcardDeltaSubscriptionStateTest, WildcardDeltaSubscriptionStateTest, @@ -183,6 +513,50 @@ TEST_P(DeltaSubscriptionStateTest, SubscribeAndUnsubscribe) { } } +// Resources has no subscriptions should not be tracked. +TEST_P(DeltaSubscriptionStateTest, NewPushDoesntAddUntrackedResources) { + { // Add "name4", "name5", "name6" and remove "name1", "name2", "name3". + updateSubscriptionInterest({"name4", "name5", "name6"}, {"name1", "name2", "name3"}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), + UnorderedElementsAre("name1", "name2", "name3")); + } + { + // On Reconnection, only "name4", "name5", "name6" are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 1); + EXPECT_TRUE(cur_request->initial_resource_versions().contains("name6")); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + } +} + // Delta xDS reliably queues up and sends all discovery requests, even in situations where it isn't // strictly necessary. E.g.: if you subscribe but then unsubscribe to a given resource, all before a // request was able to be sent, two requests will be sent. The following tests demonstrate this. @@ -400,31 +774,266 @@ TEST_P(DeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) { EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); } -// For wildcard subscription, upon a reconnection, the server is supposed to assume a -// blank slate for the Envoy's state (hence the need for initial_resource_versions), and -// the resource_names_subscribe and resource_names_unsubscribe must be empty (as is expected -// of every wildcard first message). This is true even if in between the last request of the -// last stream and the first request of the new stream, Envoy gained or lost interest in a -// resource. The subscription & unsubscription implicitly takes effect by simply requesting a -// wildcard subscription in the newly reconnected stream. -TEST_P(WildcardDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnect) { +// Check that switching into wildcard subscription after initial +// request switches us into the explicit wildcard mode. +TEST_P(DeltaSubscriptionStateTest, SwitchIntoWildcardMode) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + // We call deliverDiscoveryResponse twice in this test. + EXPECT_CALL(*ttl_timer_, disableTimer()).Times(2); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + // switch into wildcard mode + updateSubscriptionInterest({"name4", WildcardStr}, {"name1"}); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + // Regarding the resource_names_subscribe field: + // name1: do not include: we lost interest. + // name2: yes do include: we are explicitly interested (from test's base constructor) + // name3: yes do include: we are explicitly interested (from test's base constructor) + // name4: yes do include: we are explicitly interested + // *: explicit wildcard subscription + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name2", "name3", "name4", Wildcard)); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + + Protobuf::RepeatedPtrField add4_5 = + populateRepeatedResource({{"name4", "version4A"}, {"name5", "version5A"}}); + deliverDiscoveryResponse(add4_5, {}, "debugversion1"); + + markStreamFresh(); // simulate a stream reconnection + cur_request = getNextRequestAckless(); + // Regarding the resource_names_subscribe field: + // name1: do not include: we lost interest. + // name2: yes do include: we are explicitly interested (from test's base constructor) + // name3: yes do include: we are explicitly interested (from test's base constructor) + // name4: yes do include: we are explicitly interested + // name5: do not include: we are implicitly interested, so this resource should not appear on the + // initial request + // *: explicit wildcard subscription + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name2", "name3", "name4", Wildcard)); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// For wildcard subscription, upon a reconnection, the server is supposed to assume a blank slate +// for the Envoy's state (hence the need for initial_resource_versions), and the +// resource_names_subscribe and resource_names_unsubscribe must be empty if we haven't gained any +// new explicit interest in a resource. In such case, the client should send an empty request. +TEST_P(WildcardDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnectImplicit) { Protobuf::RepeatedPtrField add1_2 = populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); EXPECT_CALL(*ttl_timer_, disableTimer()); deliverDiscoveryResponse(add1_2, {}, "debugversion1"); - updateSubscriptionInterest({"name3"}, {"name1"}); markStreamFresh(); // simulate a stream reconnection auto cur_request = getNextRequestAckless(); // Regarding the resource_names_subscribe field: // name1: do not include: we lost interest. - // name2: do not include: we are interested, but for wildcard it shouldn't be provided. - // name4: do not include: although we are newly interested, an initial wildcard request - // must be with no resources. + // name2: do not include: we are implicitly interested, but for wildcard it shouldn't be provided. EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); } +// For wildcard subscription, upon a reconnection, the server is supposed to assume a blank slate +// for the Envoy's state (hence the need for initial_resource_versions). The +// resource_names_unsubscribe must be empty (as is expected of every wildcard first message). The +// resource_names_subscribe should contain all the resources we are explicitly interested in and a +// special resource denoting a wildcard subscription. +TEST_P(WildcardDeltaSubscriptionStateTest, SubscribeAndUnsubscribeAfterReconnectExplicit) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + updateSubscriptionInterest({"name3"}, {}); + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + // Regarding the resource_names_subscribe field: + // name1: do not include: see below + // name2: do not include: we are implicitly interested, but for wildcard it shouldn't be provided. + // name3: yes do include: we are explicitly interested. + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre(Wildcard, "name3")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Check the contents of the requests after cancelling the wildcard +// subscription and then reconnection. The second request should look +// like a non-wildcard request, so mention all the known resources in +// the initial request. +TEST_P(WildcardDeltaSubscriptionStateTest, CancellingImplicitWildcardSubscription) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + + updateSubscriptionInterest({"name3"}, {WildcardStr}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name3")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre(Wildcard)); + markStreamFresh(); // simulate a stream reconnection + // Regarding the resource_names_subscribe field: + // name1: do not include, see below + // name2: do not include: it came from wildcard subscription we lost interest in, so we are not + // interested in name2 too + // name3: yes do include: we are interested + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name3")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Check the contents of the requests after cancelling the wildcard +// subscription and then reconnection. The second request should look +// like a non-wildcard request, so mention all the known resources in +// the initial request. +TEST_P(WildcardDeltaSubscriptionStateTest, CancellingExplicitWildcardSubscription) { + Protobuf::RepeatedPtrField add1_2 = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + deliverDiscoveryResponse(add1_2, {}, "debugversion1"); + // switch to explicit wildcard subscription + updateSubscriptionInterest({"name3"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name3")); + + // cancel wildcard subscription + updateSubscriptionInterest({"name4"}, {WildcardStr}); + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name4")); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre(Wildcard)); + markStreamFresh(); // simulate a stream reconnection + // Regarding the resource_names_subscribe field: + // name1: do not include: see name2 + // name2: do not include: it came as a part of wildcard subscription we cancelled, so we are not + // interested in this resource name3: yes do include: we are interested, and it's not wildcard. + // name4: yes do include: we are interested, and it's not wildcard. + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name3", "name4")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Check that resource changes from being interested in implicitly to explicitly when we update the +// subscription interest. Such resources will show up in the initial wildcard requests +// too. Receiving the update on such resource will not change their interest mode. +TEST_P(WildcardDeltaSubscriptionStateTest, ExplicitInterestOverridesImplicit) { + Protobuf::RepeatedPtrField add1_2_a = + populateRepeatedResource({{"name1", "version1A"}, {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()).Times(2); + deliverDiscoveryResponse(add1_2_a, {}, "debugversion1"); + + // verify that neither name1 nor name2 appears in the initial request (they are of implicit + // interest and initial wildcard request should not contain those). + markStreamFresh(); // simulate a stream reconnection + auto cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + + // express the interest in name1 explicitly and verify that the follow-up request will contain it + // (this also switches the wildcard mode to explicit, but we won't see * in resource names, + // because we already are in wildcard mode). + updateSubscriptionInterest({"name1"}, {}); + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name1")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + + // verify that name1 and * appear in the initial request (name1 is of explicit interest and we are + // in explicit wildcard mode). + markStreamFresh(); // simulate a stream reconnection + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name1", Wildcard)); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + + // verify that getting an update on name1 will keep name1 in the explicit interest mode + Protobuf::RepeatedPtrField add1_2_b = + populateRepeatedResource({{"name1", "version1B"}, {"name2", "version2B"}}); + deliverDiscoveryResponse(add1_2_b, {}, "debugversion1"); + markStreamFresh(); // simulate a stream reconnection + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("name1", Wildcard)); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// Check that resource changes from being interested in implicitly to explicitly when we update the +// subscription interest. Such resources will show up in the initial wildcard requests +// too. Receiving the update on such resource will not change their interest mode. +TEST_P(WildcardDeltaSubscriptionStateTest, ResetToLegacyWildcardBehaviorOnStreamReset) { + // verify that we will send the legacy wildcard subscription request + // after stream reset + updateSubscriptionInterest({"resource"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("resource")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + updateSubscriptionInterest({}, {"resource"}); + cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("resource")); + markStreamFresh(); // simulate a stream reconnection + cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + + // verify that we will send the legacy wildcard subscription request + // after stream reset and confirming our subscription interest + updateSubscriptionInterest({"resource"}, {}); + cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), UnorderedElementsAre("resource")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + updateSubscriptionInterest({}, {"resource"}); + cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_THAT(cur_request->resource_names_unsubscribe(), UnorderedElementsAre("resource")); + markStreamFresh(); // simulate a stream reconnection + updateSubscriptionInterest({}, {}); + cur_request = getNextRequestAckless(); + EXPECT_TRUE(cur_request->resource_names_subscribe().empty()); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); +} + +// All resources from the server should be tracked. +TEST_P(WildcardDeltaSubscriptionStateTest, AllResourcesFromServerAreTrackedInWildcardXDS) { + { // Add "name4", "name5", "name6" + updateSubscriptionInterest({"name4", "name5", "name6"}, {}); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre("name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + } + { + // On Reconnection, only "name4", "name5", "name6" and wildcard resource are sent. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre(WildcardStr, "name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + EXPECT_TRUE(cur_request->initial_resource_versions().empty()); + } + // The xDS server's first response includes removed items name1 and 2, and a + // completely unrelated resource "bluhbluh". + { + Protobuf::RepeatedPtrField added_resources = + populateRepeatedResource({{"name1", "version1A"}, + {"bluhbluh", "bluh"}, + {"name6", "version6A"}, + {"name2", "version2A"}}); + EXPECT_CALL(*ttl_timer_, disableTimer()); + UpdateAck ack = deliverDiscoveryResponse(added_resources, {}, "debug1", "nonce1"); + EXPECT_EQ("nonce1", ack.nonce_); + EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); + } + { // Simulate a stream reconnection, just to see the current resource_state_. + markStreamFresh(); + auto cur_request = getNextRequestAckless(); + EXPECT_THAT(cur_request->resource_names_subscribe(), + UnorderedElementsAre(WildcardStr, "name4", "name5", "name6")); + EXPECT_TRUE(cur_request->resource_names_unsubscribe().empty()); + ASSERT_EQ(cur_request->initial_resource_versions().size(), 4); + EXPECT_EQ(cur_request->initial_resource_versions().at("name1"), "version1A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("bluhbluh"), "bluh"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name6"), "version6A"); + EXPECT_EQ(cur_request->initial_resource_versions().at("name2"), "version2A"); + } +} + // initial_resource_versions should not be present on messages after the first in a stream. TEST_P(DeltaSubscriptionStateTest, InitialVersionMapFirstMessageOnly) { // First, verify that the first message of a new stream sends initial versions. @@ -594,10 +1203,10 @@ TEST_P(DeltaSubscriptionStateTest, TypeUrlMismatch) { handleResponse(message); } -class VhdsDeltaSubscriptionStateTest : public DeltaSubscriptionStateTestBase { +class VhdsDeltaSubscriptionStateTest : public DeltaSubscriptionStateTestWithResources { public: VhdsDeltaSubscriptionStateTest() - : DeltaSubscriptionStateTestBase("envoy.config.route.v3.VirtualHost", false, GetParam()) {} + : DeltaSubscriptionStateTestWithResources("envoy.config.route.v3.VirtualHost", GetParam()) {} }; INSTANTIATE_TEST_SUITE_P(VhdsDeltaSubscriptionStateTest, VhdsDeltaSubscriptionStateTest, diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index f184a721ea35..2c5d5750d1ba 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -9,6 +9,7 @@ #include "source/common/config/grpc_subscription_impl.h" #include "source/common/config/new_grpc_mux_impl.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/grpc/common.h" #include "test/common/config/subscription_test_harness.h" @@ -32,17 +33,26 @@ namespace { class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { public: - DeltaSubscriptionTestHarness() : DeltaSubscriptionTestHarness(std::chrono::milliseconds(0)) {} - DeltaSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) + DeltaSubscriptionTestHarness(Envoy::Config::LegacyOrUnified legacy_or_unified) + : DeltaSubscriptionTestHarness(legacy_or_unified, std::chrono::milliseconds(0)) {} + DeltaSubscriptionTestHarness(Envoy::Config::LegacyOrUnified legacy_or_unified, + std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints")), - async_client_(new Grpc::MockAsyncClient()) { + async_client_(new Grpc::MockAsyncClient()), + should_use_unified_(legacy_or_unified == Envoy::Config::LegacyOrUnified::Unified) { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); - xds_context_ = std::make_shared( - std::unique_ptr(async_client_), dispatcher_, *method_descriptor_, - random_, stats_store_, rate_limit_settings_, local_info_); + if (should_use_unified_) { + xds_context_ = std::make_shared( + std::unique_ptr(async_client_), dispatcher_, *method_descriptor_, + random_, stats_store_, rate_limit_settings_, local_info_, false); + } else { + xds_context_ = std::make_shared( + std::unique_ptr(async_client_), dispatcher_, *method_descriptor_, + random_, stats_store_, rate_limit_settings_, local_info_); + } subscription_ = std::make_unique( xds_context_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false, @@ -126,6 +136,17 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { }); } + void onDiscoveryResponse( + std::unique_ptr&& response) { + if (should_use_unified_) { + dynamic_cast(subscription_->grpcMux().get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + } else { + dynamic_cast(subscription_->grpcMux().get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + } + void deliverConfigUpdate(const std::vector& cluster_names, const std::string& version, bool accept) override { auto response = std::make_unique(); @@ -155,8 +176,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); expectSendMessage({}, {}, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config", {}); } - static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); Mock::VerifyAndClearExpectations(&async_stream_); } @@ -196,7 +216,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { NiceMock random_; NiceMock local_info_; Grpc::MockAsyncStream async_stream_; - NewGrpcMuxImplSharedPtr xds_context_; + GrpcMuxSharedPtr xds_context_; GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; @@ -209,6 +229,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { std::queue nonce_acks_required_; std::queue nonce_acks_sent_; bool subscription_started_{}; + bool should_use_unified_; }; } // namespace diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 760b24a305bb..fcc438a14d4a 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -108,11 +108,10 @@ TEST_F(GrpcStreamTest, LogClose) { Grpc::Status::WellKnownGrpcStatus::Unavailable); // Different retriable failure: warn. - time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + time_system_.advanceTimeWait(std::chrono::seconds(1)); EXPECT_CALL(callbacks_, onEstablishmentFailure()); EXPECT_LOG_CONTAINS( - "warn", "stream closed: 4, Deadline Exceeded (previously 14, Unavailable since 1000ms ago)", - { + "warn", "stream closed: 4, Deadline Exceeded (previously 14, Unavailable since 1s ago)", { grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, "Deadline Exceeded"); }); @@ -120,7 +119,7 @@ TEST_F(GrpcStreamTest, LogClose) { Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); // Same retriable failure after a short amount of time: debug. - time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + time_system_.advanceTimeWait(std::chrono::seconds(1)); EXPECT_CALL(callbacks_, onEstablishmentFailure()); EXPECT_LOG_CONTAINS("debug", "gRPC config stream closed", { grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, @@ -130,24 +129,35 @@ TEST_F(GrpcStreamTest, LogClose) { Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); // Same retriable failure after a long time: warn. - time_system_.advanceTimeWait(std::chrono::milliseconds(100000)); + time_system_.advanceTimeWait(std::chrono::seconds(100)); EXPECT_CALL(callbacks_, onEstablishmentFailure()); - EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 101000ms ago", { + EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 101s ago: 4, Deadline Exceeded", { grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, "Deadline Exceeded"); }); EXPECT_EQ(grpc_stream_.getCloseStatus().value(), Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); - // Warn again. - time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + // Warn again, using the newest message. + time_system_.advanceTimeWait(std::chrono::seconds(1)); EXPECT_CALL(callbacks_, onEstablishmentFailure()); - EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 102000ms ago", { + EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 102s ago: 4, new message", { grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, - "Deadline Exceeded"); + "new message"); }); EXPECT_EQ(grpc_stream_.getCloseStatus().value(), Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + + // Different retriable failure, using the most recent error message from the previous one. + time_system_.advanceTimeWait(std::chrono::seconds(1)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS( + "warn", + "gRPC config stream closed: 14, Unavailable (previously 4, new message since 103s ago)", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Unavailable, "Unavailable"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::Unavailable); } // Successfully receiving a message clears close status. @@ -158,7 +168,7 @@ TEST_F(GrpcStreamTest, LogClose) { EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); // Status isn't cleared yet. EXPECT_EQ(grpc_stream_.getCloseStatus().value(), - Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + Grpc::Status::WellKnownGrpcStatus::Unavailable); auto response = std::make_unique(); grpc_stream_.onReceiveMessage(std::move(response)); diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 74860c19e859..44505dabd013 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -8,10 +8,17 @@ namespace Envoy { namespace Config { namespace { -class GrpcSubscriptionImplTest : public testing::Test, public GrpcSubscriptionTestHarness {}; +class GrpcSubscriptionImplTest : public testing::TestWithParam, + public GrpcSubscriptionTestHarness { +public: + GrpcSubscriptionImplTest() : GrpcSubscriptionTestHarness(GetParam()) {} +}; + +INSTANTIATE_TEST_SUITE_P(GrpcSubscriptionImplTest, GrpcSubscriptionImplTest, + testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); // Validate that stream creation results in a timer based retry and can recover. -TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { +TEST_P(GrpcSubscriptionImplTest, StreamCreationFailure) { InSequence s; EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(nullptr)); @@ -37,7 +44,7 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { } // Validate that the client can recover from a remote stream closure via retry. -TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { +TEST_P(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); // onConfigUpdateFailed() should not be called for gRPC stream connection failure @@ -46,7 +53,7 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { .Times(0); EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(random_, random()); - mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + onRemoteClose(); EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); verifyControlPlaneStats(0); @@ -59,7 +66,7 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { // Validate that When the management server gets multiple requests for the same version, it can // ignore later ones. This allows the nonce to be used. -TEST_F(GrpcSubscriptionImplTest, RepeatedNonce) { +TEST_P(GrpcSubscriptionImplTest, RepeatedNonce) { InSequence s; startSubscription({"cluster0", "cluster1"}); EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); @@ -79,7 +86,7 @@ TEST_F(GrpcSubscriptionImplTest, RepeatedNonce) { EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } -TEST_F(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { +TEST_P(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { InSequence s; startSubscription({"cluster0", "cluster1"}); EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); @@ -87,7 +94,7 @@ TEST_F(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } -TEST_F(GrpcSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { +TEST_P(GrpcSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { InSequence s; startSubscription({"cluster0", "cluster1"}); EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 5d49e74b2eff..85bbcb5c6c2f 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -11,6 +11,7 @@ #include "source/common/config/api_version.h" #include "source/common/config/grpc_mux_impl.h" #include "source/common/config/grpc_subscription_impl.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "test/common/config/subscription_test_harness.h" #include "test/mocks/config/mocks.h" @@ -34,21 +35,30 @@ namespace Config { class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { public: - GrpcSubscriptionTestHarness() : GrpcSubscriptionTestHarness(std::chrono::milliseconds(0)) {} + GrpcSubscriptionTestHarness(Envoy::Config::LegacyOrUnified legacy_or_unified) + : GrpcSubscriptionTestHarness(legacy_or_unified, std::chrono::milliseconds(0)) {} - GrpcSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) + GrpcSubscriptionTestHarness(Envoy::Config::LegacyOrUnified legacy_or_unified, + std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints")), - async_client_(new NiceMock()) { + async_client_(new NiceMock()), + should_use_unified_(legacy_or_unified == Envoy::Config::LegacyOrUnified::Unified) { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); ttl_timer_ = new NiceMock(&dispatcher_); timer_ = new Event::MockTimer(&dispatcher_); - mux_ = std::make_shared( - local_info_, std::unique_ptr(async_client_), dispatcher_, - *method_descriptor_, random_, stats_store_, rate_limit_settings_, true); + if (should_use_unified_) { + mux_ = std::make_shared( + std::unique_ptr(async_client_), dispatcher_, *method_descriptor_, + random_, stats_store_, rate_limit_settings_, local_info_, true); + } else { + mux_ = std::make_shared( + local_info_, std::unique_ptr(async_client_), dispatcher_, + *method_descriptor_, random_, stats_store_, rate_limit_settings_, true); + } subscription_ = std::make_unique( mux_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false, SubscriptionOptions()); @@ -87,7 +97,9 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { error_detail->set_code(error_code); error_detail->set_message(error_message); } - EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false)); + EXPECT_CALL( + async_stream_, + sendMessageRaw_(Grpc::ProtoBufferEqIgnoreRepeatedFieldOrdering(expected_request), false)); } void startSubscription(const std::set& cluster_names) override { @@ -97,6 +109,29 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { subscription_->start(flattenResources(cluster_names)); } + void onDiscoveryResponse( + std::unique_ptr&& response) { + if (should_use_unified_) { + dynamic_cast(mux_.get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + return; + } + dynamic_cast(mux_.get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + void onRemoteClose() { + if (should_use_unified_) { + dynamic_cast(mux_.get()) + ->grpcStreamForTest() + .onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + return; + } + dynamic_cast(mux_.get()) + ->grpcStreamForTest() + .onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + } + void deliverConfigUpdate(const std::vector& cluster_names, const std::string& version, bool accept) override { std::unique_ptr response( @@ -131,24 +166,27 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage(last_cluster_names_, version_, false, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config"); } - mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + + onDiscoveryResponse(std::move(response)); EXPECT_EQ(control_plane_stats_.identifier_.value(), "ground_control_foo123"); Mock::VerifyAndClearExpectations(&async_stream_); } void updateResourceInterest(const std::set& cluster_names) override { - // The "watch" mechanism means that updates that lose interest in a resource - // will first generate a request for [still watched resources, i.e. without newly unwatched - // ones] before generating the request for all of cluster_names. - // TODO(fredlas) this unnecessary second request will stop happening once the watch mechanism is - // no longer internally used by GrpcSubscriptionImpl. - std::set both; - for (const auto& n : cluster_names) { - if (last_cluster_names_.find(n) != last_cluster_names_.end()) { - both.insert(n); + if (!should_use_unified_) { + // The "watch" mechanism means that updates that lose interest in a resource + // will first generate a request for [still watched resources, i.e. without newly unwatched + // ones] before generating the request for all of cluster_names. + // TODO(fredlas) this unnecessary second request will stop happening once the watch mechanism + // is no longer internally used by GrpcSubscriptionImpl. + std::set both; + for (const auto& n : cluster_names) { + if (last_cluster_names_.find(n) != last_cluster_names_.end()) { + both.insert(n); + } } + expectSendMessage(both, version_); } - expectSendMessage(both, version_); expectSendMessage(cluster_names, version_); subscription_->updateResourceInterest(flattenResources(cluster_names)); last_cluster_names_ = cluster_names; @@ -183,12 +221,13 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { resource_decoder_{"cluster_name"}; NiceMock local_info_; NiceMock async_stream_; - GrpcMuxImplSharedPtr mux_; + GrpcMuxSharedPtr mux_; GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; Envoy::Config::RateLimitSettings rate_limit_settings_; Event::MockTimer* init_timeout_timer_; + bool should_use_unified_; }; // TODO(danielhochman): test with RDS and ensure version_info is same as what API returned diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 6fca22264265..f02373044613 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -61,8 +61,7 @@ class NewGrpcMuxImplTestBase : public testing::TestWithParam { std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, - local_info_, false); + random_, stats_, rate_limit_settings_, local_info_, false); return; } grpc_mux_ = std::make_unique( diff --git a/test/common/config/sotw_subscription_state_test.cc b/test/common/config/sotw_subscription_state_test.cc index 8347d511fae6..ce25427690eb 100644 --- a/test/common/config/sotw_subscription_state_test.cc +++ b/test/common/config/sotw_subscription_state_test.cc @@ -14,6 +14,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::An; using testing::IsSubstring; using testing::NiceMock; using testing::Throw; @@ -74,7 +75,8 @@ class SotwSubscriptionStateTest : public testing::Test { for (const auto& resource_name : resource_names) { response.add_resources()->PackFrom(resource(resource_name)); } - EXPECT_CALL(callbacks_, onConfigUpdate(_, version_info)); + EXPECT_CALL(callbacks_, + onConfigUpdate(An&>(), version_info)); return state_->handleResponse(response); } @@ -87,7 +89,8 @@ class SotwSubscriptionStateTest : public testing::Test { response.set_nonce(nonce); response.set_type_url(Config::getTypeUrl()); response.add_resources()->PackFrom(resource); - EXPECT_CALL(callbacks_, onConfigUpdate(_, version_info)); + EXPECT_CALL(callbacks_, + onConfigUpdate(An&>(), version_info)); return state_->handleResponse(response); } @@ -95,7 +98,8 @@ class SotwSubscriptionStateTest : public testing::Test { envoy::service::discovery::v3::DiscoveryResponse message; message.set_version_info(version_info); message.set_nonce(nonce); - EXPECT_CALL(callbacks_, onConfigUpdate(_, _)).WillOnce(Throw(EnvoyException("oh no"))); + EXPECT_CALL(callbacks_, onConfigUpdate(An&>(), _)) + .WillOnce(Throw(EnvoyException("oh no"))); return state_->handleResponse(message); } diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 48c263af84c8..0aa9a84786d2 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -36,6 +36,8 @@ namespace Envoy { namespace Config { namespace { +enum class LegacyOrUnified { Legacy, Unified }; + class SubscriptionFactoryTest : public testing::Test { public: SubscriptionFactoryTest() @@ -69,14 +71,31 @@ class SubscriptionFactoryTest : public testing::Test { NiceMock local_info_; NiceMock validation_visitor_; Api::ApiPtr api_; - NiceMock runtime_; SubscriptionFactoryImpl subscription_factory_; }; +class SubscriptionFactoryTestUnifiedOrLegacyMux + : public SubscriptionFactoryTest, + public testing::WithParamInterface { +public: + SubscriptionFactoryTestUnifiedOrLegacyMux() { + if (GetParam() == LegacyOrUnified::Unified) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.unified_mux", "true"}}); + } + } + + TestScopedRuntime scoped_runtime_; +}; + class SubscriptionFactoryTestApiConfigSource : public SubscriptionFactoryTest, public testing::WithParamInterface {}; +INSTANTIATE_TEST_SUITE_P(SubscriptionFactoryTestUnifiedOrLegacyMux, + SubscriptionFactoryTestUnifiedOrLegacyMux, + ::testing::Values(LegacyOrUnified::Unified, LegacyOrUnified::Legacy)); + TEST_F(SubscriptionFactoryTest, NoConfigSpecifier) { envoy::config::core::v3::ConfigSource config; EXPECT_THROW_WITH_MESSAGE( @@ -95,7 +114,7 @@ TEST_F(SubscriptionFactoryTest, RestClusterEmpty) { "API configs must have either a gRPC service or a cluster name defined:"); } -TEST_F(SubscriptionFactoryTest, GrpcClusterEmpty) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcClusterEmpty) { envoy::config::core::v3::ConfigSource config; Upstream::ClusterManager::ClusterSet primary_clusters; @@ -121,7 +140,7 @@ TEST_F(SubscriptionFactoryTest, RestClusterSingleton) { subscriptionFromConfigSource(config); } -TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcClusterSingleton) { envoy::config::core::v3::ConfigSource config; Upstream::ClusterManager::ClusterSet primary_clusters; @@ -169,7 +188,7 @@ TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { config.mutable_api_config_source()->GetTypeName())); } -TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcClusterMultiton) { envoy::config::core::v3::ConfigSource config; Upstream::ClusterManager::ClusterSet primary_clusters; @@ -290,7 +309,7 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionNoRefreshDelay) { "refresh_delay is required for REST API configuration sources"); } -TEST_F(SubscriptionFactoryTest, GrpcSubscription) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcSubscription) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); @@ -318,14 +337,14 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { subscriptionFromConfigSource(config)->start({"static_cluster"}); } -TEST_F(SubscriptionFactoryTest, GrpcCollectionSubscriptionBadType) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcCollectionSubscriptionBadType) { EXPECT_THROW_WITH_MESSAGE(collectionSubscriptionFromUrl("xdstp:///foo", {})->start({}), EnvoyException, "xdstp:// type does not match " "envoy.config.endpoint.v3.ClusterLoadAssignment in xdstp:///foo"); } -TEST_F(SubscriptionFactoryTest, GrpcCollectionSubscriptionUnsupportedApiType) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcCollectionSubscriptionUnsupportedApiType) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); @@ -341,7 +360,20 @@ TEST_F(SubscriptionFactoryTest, GrpcCollectionSubscriptionUnsupportedApiType) { EnvoyException, "Unknown xdstp:// transport API type in api_type: GRPC"); } -TEST_F(SubscriptionFactoryTest, GrpcCollectionAggregatedSubscription) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, + GrpcCollectionSubscriptionUnsupportedConfigSpecifierType) { + envoy::config::core::v3::ConfigSource config; + config.set_path("/path/foo/bar"); + EXPECT_THROW_WITH_REGEX( + collectionSubscriptionFromUrl( + "xdstp://foo/envoy.config.endpoint.v3.ClusterLoadAssignment/bar", config) + ->start({}), + EnvoyException, + "Missing or not supported config source specifier in envoy::config::core::v3::ConfigSource " + "for a collection. Only ADS and gRPC in delta-xDS mode are supported."); +} + +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcCollectionAggregatedSubscription) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::AGGREGATED_DELTA_GRPC); @@ -360,7 +392,7 @@ TEST_F(SubscriptionFactoryTest, GrpcCollectionAggregatedSubscription) { ->start({}); } -TEST_F(SubscriptionFactoryTest, GrpcCollectionDeltaSubscription) { +TEST_P(SubscriptionFactoryTestUnifiedOrLegacyMux, GrpcCollectionDeltaSubscription) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::DELTA_GRPC); diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index 4076319aa80e..542d518232cc 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -16,6 +16,8 @@ namespace { enum class SubscriptionType { Grpc, DeltaGrpc, + UnifiedGrpc, + UnifiedDeltaGrpc, Http, Filesystem, }; @@ -48,10 +50,20 @@ class SubscriptionImplTest : public testing::TestWithParam { void initialize(std::chrono::milliseconds init_fetch_timeout = std::chrono::milliseconds(0)) { switch (GetParam()) { case SubscriptionType::Grpc: - test_harness_ = std::make_unique(init_fetch_timeout); + test_harness_ = std::make_unique(LegacyOrUnified::Legacy, + init_fetch_timeout); break; case SubscriptionType::DeltaGrpc: - test_harness_ = std::make_unique(init_fetch_timeout); + test_harness_ = std::make_unique(LegacyOrUnified::Legacy, + init_fetch_timeout); + break; + case SubscriptionType::UnifiedGrpc: + test_harness_ = std::make_unique(LegacyOrUnified::Unified, + init_fetch_timeout); + break; + case SubscriptionType::UnifiedDeltaGrpc: + test_harness_ = std::make_unique(LegacyOrUnified::Unified, + init_fetch_timeout); break; case SubscriptionType::Http: test_harness_ = std::make_unique(init_fetch_timeout); diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index e1b5421da711..3802babd4b1b 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -11,6 +11,7 @@ namespace Envoy { namespace Config { +enum class LegacyOrUnified { Legacy, Unified }; const uint64_t TEST_TIME_MILLIS = 42000; /** diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index 95bf82487922..f822f37e8cc8 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -25,6 +25,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "udpa/type/v1/typed_struct.pb.h" +#include "xds/type/v3/typed_struct.pb.h" using testing::Ref; using testing::Return; @@ -328,20 +329,27 @@ TEST(UtilityTest, TranslateAnyToFactoryConfig) { EXPECT_THAT(*config, ProtoEq(source_duration)); } -void packTypedStructIntoAny(ProtobufWkt::Any& typed_config, const Protobuf::Message& inner) { - udpa::type::v1::TypedStruct typed_struct; - (*typed_struct.mutable_type_url()) = - absl::StrCat("type.googleapis.com/", inner.GetDescriptor()->full_name()); - MessageUtil::jsonConvert(inner, *typed_struct.mutable_value()); - typed_config.PackFrom(typed_struct); -} +template class UtilityTypedStructTest : public ::testing::Test { +public: + static void packTypedStructIntoAny(ProtobufWkt::Any& typed_config, + const Protobuf::Message& inner) { + T typed_struct; + (*typed_struct.mutable_type_url()) = + absl::StrCat("type.googleapis.com/", inner.GetDescriptor()->full_name()); + MessageUtil::jsonConvert(inner, *typed_struct.mutable_value()); + typed_config.PackFrom(typed_struct); + } +}; + +using TypedStructTypes = ::testing::Types; +TYPED_TEST_SUITE(UtilityTypedStructTest, TypedStructTypes); -// Verify that udpa.type.v1.TypedStruct can be translated into google.protobuf.Struct -TEST(UtilityTest, TypedStructToStruct) { +// Verify that TypedStruct can be translated into google.protobuf.Struct +TYPED_TEST(UtilityTypedStructTest, TypedStructToStruct) { ProtobufWkt::Any typed_config; ProtobufWkt::Struct untyped_struct; (*untyped_struct.mutable_fields())["foo"].set_string_value("bar"); - packTypedStructIntoAny(typed_config, untyped_struct); + this->packTypedStructIntoAny(typed_config, untyped_struct); ProtobufWkt::Struct out; Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), out); @@ -349,16 +357,16 @@ TEST(UtilityTest, TypedStructToStruct) { EXPECT_THAT(out, ProtoEq(untyped_struct)); } -// Verify that udpa.type.v1.TypedStruct can be translated into an arbitrary message of correct type +// Verify that TypedStruct can be translated into an arbitrary message of correct type // (v2 API, no upgrading). -TEST(UtilityTest, TypedStructToClusterV2) { +TYPED_TEST(UtilityTypedStructTest, TypedStructToClusterV2) { ProtobufWkt::Any typed_config; API_NO_BOOST(envoy::api::v2::Cluster) cluster; const std::string cluster_config_yaml = R"EOF( drain_connections_on_host_removal: true )EOF"; TestUtility::loadFromYaml(cluster_config_yaml, cluster); - packTypedStructIntoAny(typed_config, cluster); + this->packTypedStructIntoAny(typed_config, cluster); { API_NO_BOOST(envoy::api::v2::Cluster) out; @@ -373,16 +381,16 @@ TEST(UtilityTest, TypedStructToClusterV2) { } } -// Verify that udpa.type.v1.TypedStruct can be translated into an arbitrary message of correct type +// Verify that TypedStruct can be translated into an arbitrary message of correct type // (v3 API, upgrading). -TEST(UtilityTest, TypedStructToClusterV3) { +TYPED_TEST(UtilityTypedStructTest, TypedStructToClusterV3) { ProtobufWkt::Any typed_config; API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; const std::string cluster_config_yaml = R"EOF( ignore_health_on_host_removal: true )EOF"; TestUtility::loadFromYaml(cluster_config_yaml, cluster); - packTypedStructIntoAny(typed_config, cluster); + this->packTypedStructIntoAny(typed_config, cluster); { API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; @@ -397,6 +405,30 @@ TEST(UtilityTest, TypedStructToClusterV3) { } } +// Verify that translation from TypedStruct into message of incorrect type fails +TYPED_TEST(UtilityTypedStructTest, TypedStructToInvalidType) { + ProtobufWkt::Any typed_config; + envoy::config::bootstrap::v3::Bootstrap bootstrap; + const std::string bootstrap_config_yaml = R"EOF( + admin: + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/null + address: + pipe: + path: "/" + )EOF"; + TestUtility::loadFromYaml(bootstrap_config_yaml, bootstrap); + this->packTypedStructIntoAny(typed_config, bootstrap); + + ProtobufWkt::Any out; + EXPECT_THROW_WITH_REGEX(Utility::translateOpaqueConfig( + typed_config, ProtobufMessage::getStrictValidationVisitor(), out), + EnvoyException, "Unable to parse JSON as proto"); +} + // Verify that Any can be translated into an arbitrary message of correct type // (v2 API, no upgrading). TEST(UtilityTest, AnyToClusterV2) { @@ -429,30 +461,6 @@ TEST(UtilityTest, AnyToClusterV3) { EXPECT_THAT(out, ProtoEq(cluster)); } -// Verify that translation from udpa.type.v1.TypedStruct into message of incorrect type fails -TEST(UtilityTest, TypedStructToInvalidType) { - ProtobufWkt::Any typed_config; - envoy::config::bootstrap::v3::Bootstrap bootstrap; - const std::string bootstrap_config_yaml = R"EOF( - admin: - access_log: - - name: envoy.access_loggers.file - typed_config: - "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog - path: /dev/null - address: - pipe: - path: "/" - )EOF"; - TestUtility::loadFromYaml(bootstrap_config_yaml, bootstrap); - packTypedStructIntoAny(typed_config, bootstrap); - - ProtobufWkt::Any out; - EXPECT_THROW_WITH_REGEX(Utility::translateOpaqueConfig( - typed_config, ProtobufMessage::getStrictValidationVisitor(), out), - EnvoyException, "Unable to parse JSON as proto"); -} - // Verify that ProtobufWkt::Empty can load into a typed factory with an empty config proto TEST(UtilityTest, EmptyToEmptyConfig) { ProtobufWkt::Any typed_config; diff --git a/test/common/config/xds_grpc_mux_impl_test.cc b/test/common/config/xds_grpc_mux_impl_test.cc index 70bde3f3fe7c..0943e469cb09 100644 --- a/test/common/config/xds_grpc_mux_impl_test.cc +++ b/test/common/config/xds_grpc_mux_impl_test.cc @@ -61,8 +61,7 @@ class GrpcMuxImplTestBase : public testing::Test { std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, - local_info_, true); + random_, stats_, rate_limit_settings_, local_info_, true); } void setup(const RateLimitSettings& custom_rate_limit_settings) { @@ -70,8 +69,7 @@ class GrpcMuxImplTestBase : public testing::Test { std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, custom_rate_limit_settings, - local_info_, true); + random_, stats_, custom_rate_limit_settings, local_info_, true); } void expectSendMessage(const std::string& type_url, @@ -890,8 +888,7 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, - local_info_, true), + random_, stats_, rate_limit_settings_, local_info_, true), EnvoyException, "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); @@ -904,8 +901,7 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) { std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, - local_info_, true), + random_, stats_, rate_limit_settings_, local_info_, true), EnvoyException, "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); @@ -934,6 +930,55 @@ TEST_F(GrpcMuxImplTest, DynamicContextParameters) { expectSendMessage("foo", {}, "", false); } +TEST_F(GrpcMuxImplTest, AllMuxesStateTest) { + setup(); + auto grpc_mux_1 = std::make_unique( + std::unique_ptr(), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, rate_limit_settings_, local_info_, true); + + Config::XdsMux::GrpcMuxSotw::shutdownAll(); + + EXPECT_TRUE(grpc_mux_->isShutdown()); + EXPECT_TRUE(grpc_mux_1->isShutdown()); +} + +class NullGrpcMuxImplTest : public testing::Test { +public: + NullGrpcMuxImplTest() : null_mux_(std::make_unique()) {} + Config::GrpcMuxPtr null_mux_; + NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; +}; + +TEST_F(NullGrpcMuxImplTest, StartImplemented) { EXPECT_NO_THROW(null_mux_->start()); } + +TEST_F(NullGrpcMuxImplTest, PauseImplemented) { + ScopedResume scoped; + EXPECT_NO_THROW(scoped = null_mux_->pause("ignored")); +} + +TEST_F(NullGrpcMuxImplTest, PauseMultipleArgsImplemented) { + ScopedResume scoped; + const std::vector params = {"ignored", "another_ignored"}; + EXPECT_NO_THROW(scoped = null_mux_->pause(params)); +} + +TEST_F(NullGrpcMuxImplTest, RequestOnDemandNotImplemented) { + EXPECT_DEATH(null_mux_->requestOnDemandUpdate("type_url", {"for_update"}), "not implemented"); +} + +TEST_F(NullGrpcMuxImplTest, AddWatchRaisesException) { + NiceMock callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder{"cluster_name"}; + + EXPECT_THROW_WITH_REGEX(null_mux_->addWatch("type_url", {}, callbacks, resource_decoder, {}), + EnvoyException, "ADS must be configured to support an ADS config source"); +} + } // namespace } // namespace XdsMux } // namespace Config diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index 17dcfe7e8473..90ec7ceb6456 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -4,6 +4,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/mocks/upstream/host.h" +#include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -25,6 +26,15 @@ class TestActiveClient : public ActiveClient { bool closingWithIncompleteStream() const override { return false; } uint32_t numActiveStreams() const override { return active_streams_; } absl::optional protocol() const override { return absl::nullopt; } + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, "", event); + } + + static void incrementActiveStreams(ActiveClient& client) { + TestActiveClient* testClient = dynamic_cast(&client); + ASSERT_TRUE(testClient != nullptr); + testClient->active_streams_++; + } uint32_t active_streams_{}; }; @@ -67,8 +77,8 @@ class ConnPoolImplBaseTest : public testing::Test { return ret; })); ON_CALL(pool_, onPoolReady(_, _)) - .WillByDefault(Invoke([](ActiveClient& client, AttachContext&) -> void { - ++(reinterpret_cast(&client)->active_streams_); + .WillByDefault(Invoke([](ActiveClient& client, AttachContext&) { + TestActiveClient::incrementActiveStreams(client); })); } @@ -92,6 +102,112 @@ class ConnPoolImplBaseTest : public testing::Test { std::vector clients_; }; +class ConnPoolImplDispatcherBaseTest : public testing::Test { +public: + ConnPoolImplDispatcherBaseTest() + : api_(Api::createApiForTest(time_system_)), + dispatcher_(api_->allocateDispatcher("test_thread")), + pool_(host_, Upstream::ResourcePriority::Default, *dispatcher_, nullptr, nullptr, state_) { + // Default connections to 1024 because the tests shouldn't be relying on the + // connection resource limit for most tests. + cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); + ON_CALL(pool_, instantiateActiveClient).WillByDefault(Invoke([&]() -> ActiveClientPtr { + auto ret = + std::make_unique>(pool_, stream_limit_, concurrent_streams_); + clients_.push_back(ret.get()); + ret->real_host_description_ = descr_; + return ret; + })); + ON_CALL(pool_, onPoolReady(_, _)) + .WillByDefault(Invoke([](ActiveClient& client, AttachContext&) { + TestActiveClient::incrementActiveStreams(client); + })); + } + + void newConnectingClient() { + ON_CALL(*cluster_, maxConnectionDuration).WillByDefault(Return(max_connection_duration_opt_)); + + // Create a new stream using the pool + EXPECT_CALL(pool_, instantiateActiveClient); + pool_.newStreamImpl(context_); + ASSERT_EQ(1, clients_.size()); + EXPECT_EQ(ActiveClient::State::CONNECTING, clients_.back()->state()); + + // Verify that the connection duration timer isn't set yet. This shouldn't happen + // until after connect. + EXPECT_EQ(nullptr, clients_.back()->connection_duration_timer_); + } + + void newActiveClientAndStream(ActiveClient::State expected_state = ActiveClient::State::BUSY) { + // Start with a connecting client + newConnectingClient(); + + // Connect and expect the expected state. + EXPECT_CALL(pool_, onPoolReady); + clients_.back()->onEvent(Network::ConnectionEvent::Connected); + EXPECT_EQ(expected_state, clients_.back()->state()); + + // Verify that the connect duration timer is consistent with the max connection duration opt + if (max_connection_duration_opt_.has_value()) { + EXPECT_TRUE(clients_.back()->connection_duration_timer_ != nullptr); + EXPECT_TRUE(clients_.back()->connection_duration_timer_->enabled()); + } else { + EXPECT_EQ(nullptr, clients_.back()->connection_duration_timer_); + } + } + + void newDrainingClient() { + // Use a stream limit of 1 to force draining. Then, connect and expect draining. + stream_limit_ = 1; + newActiveClientAndStream(ActiveClient::State::DRAINING); + } + + void newClosedClient() { + // Start with a draining client. Then, close the stream. This will result in the client being + // closed. + newDrainingClient(); + closeStream(); + } + + // Advance time and block until the next event + void advanceTimeAndRun(uint32_t duration_ms) { + time_system_.advanceTimeAndRun(std::chrono::milliseconds(duration_ms), *dispatcher_, + Event::Dispatcher::RunType::Block); + } + + // Close the active stream + void closeStream() { + clients_.back()->active_streams_ = 0; + pool_.onStreamClosed(*clients_.back(), false); + } + + void closeStreamAndDrainClient() { + // Close the active stream and expect the client to be ready. + closeStream(); + EXPECT_EQ(ActiveClient::State::READY, clients_.back()->state()); + + // The client is still ready. So, to clean up, we have to drain the pool manually. + pool_.drainConnectionsImpl(Envoy::ConnectionPool::DrainBehavior::DrainAndDelete); + } + + Event::SimulatedTimeSystemHelper time_system_; + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + uint32_t max_connection_duration_ = 5000; + absl::optional max_connection_duration_opt_{max_connection_duration_}; + uint32_t stream_limit_ = 100; + uint32_t concurrent_streams_ = 1; + Upstream::ClusterConnectivityState state_; + std::shared_ptr> descr_{ + new NiceMock()}; + std::shared_ptr cluster_{new NiceMock()}; + Upstream::HostSharedPtr host_{ + Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", dispatcher_->timeSource())}; + TestConnPoolImplBase pool_; + AttachContext context_; + std::vector clients_; +}; + TEST_F(ConnPoolImplBaseTest, DumpState) { std::stringstream out; pool_.dumpState(out, 0); @@ -202,6 +318,105 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnectNotHealthy) { EXPECT_FALSE(pool_.maybePreconnectImpl(1)); } +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationTimerNull) { + // Force a null max connection duration optional. + // newActiveClientAndStream() will expect the connection duration timer to remain null. + max_connection_duration_opt_ = absl::nullopt; + newActiveClientAndStream(); + closeStreamAndDrainClient(); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationTimerEnabled) { + // Use the default max connection duration opt. + // newActiveClientAndStream() will expect the connection duration timer to be non-null. + newActiveClientAndStream(); + closeStreamAndDrainClient(); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationBusy) { + newActiveClientAndStream(); + + // Verify that advancing to just before the connection duration timeout doesn't drain the + // connection. + advanceTimeAndRun(max_connection_duration_ - 1); + EXPECT_EQ(0, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); + EXPECT_EQ(ActiveClient::State::BUSY, clients_.back()->state()); + + // Verify that advancing past the connection duration timeout drains the connection, + // because there's a busy client. + advanceTimeAndRun(2); + EXPECT_EQ(1, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); + EXPECT_EQ(ActiveClient::State::DRAINING, clients_.back()->state()); + closeStream(); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationReady) { + newActiveClientAndStream(); + + // Close active stream and expect that the client goes back to ready + closeStream(); + EXPECT_EQ(ActiveClient::State::READY, clients_.back()->state()); + + // Verify that advancing to just before the connection duration timeout doesn't close the + // connection. + advanceTimeAndRun(max_connection_duration_ - 1); + EXPECT_EQ(0, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); + EXPECT_EQ(ActiveClient::State::READY, clients_.back()->state()); + + // Verify that advancing past the connection duration timeout closes the connection, + // because there's nothing to drain. + advanceTimeAndRun(2); + EXPECT_EQ(1, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationAlreadyDraining) { + // Start with a client that is already draining. + newDrainingClient(); + + // Verify that advancing past the connection duration timeout does nothing to an active client + // that is already draining. + advanceTimeAndRun(max_connection_duration_ + 1); + EXPECT_EQ(0, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); + EXPECT_EQ(ActiveClient::State::DRAINING, clients_.back()->state()); + closeStream(); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationAlreadyClosed) { + // Start with a client that is already closed. + newClosedClient(); + + // Verify that advancing past the connection duration timeout does nothing to the active + // client that is already closed. + advanceTimeAndRun(max_connection_duration_ + 1); + EXPECT_EQ(0, pool_.host()->cluster().stats().upstream_cx_max_duration_reached_.value()); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationCallbackWhileClosedBug) { + // Start with a connecting client + newClosedClient(); + + // Expect an ENVOY_BUG if the connection duration callback fires while in the CLOSED state. + // We forcibly call the connection duration callback here because under normal circumstances there + // is no timer set up. + EXPECT_ENVOY_BUG(clients_.back()->onConnectionDurationTimeout(), + "max connection duration reached while closed"); +} + +TEST_F(ConnPoolImplDispatcherBaseTest, MaxConnectionDurationCallbackWhileConnectingBug) { + // Start with a connecting client + newConnectingClient(); + + // Expect an ENVOY_BUG if the connection duration callback fires while still in the CONNECTING + // state. We forcibly call the connection duration callback here because under normal + // circumstances there is no timer set up. + EXPECT_ENVOY_BUG(clients_.back()->onConnectionDurationTimeout(), + "max connection duration reached while connecting"); + + // Finish the test as if the connection was never successful. + EXPECT_CALL(pool_, onPoolFailure); + pool_.destructAllConnections(); +} + // Remote close simulates the peer closing the connection. TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredRemoteClose) { EXPECT_CALL(dispatcher_, createTimer_(_)).Times(AnyNumber()); diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index 6bcd002d3c39..d3a842ada010 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -88,8 +88,6 @@ TEST_P(FileEventImplActivateTest, Activate) { EXPECT_CALL(read_event, ready()); ReadyWatcher write_event; EXPECT_CALL(write_event, ready()); - ReadyWatcher closed_event; - EXPECT_CALL(closed_event, ready()); const FileTriggerType trigger = Event::PlatformDefaultTriggerType; @@ -103,14 +101,10 @@ TEST_P(FileEventImplActivateTest, Activate) { if (events & FileReadyType::Write) { write_event.ready(); } - - if (events & FileReadyType::Closed) { - closed_event.ready(); - } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); - file_event->activate(FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + file_event->activate(FileReadyType::Read | FileReadyType::Write); dispatcher->run(Event::Dispatcher::RunType::NonBlock); os_sys_calls_.close(fd); @@ -125,7 +119,6 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { ReadyWatcher fd_event; ReadyWatcher read_event; ReadyWatcher write_event; - ReadyWatcher closed_event; ReadyWatcher prepare_watcher; evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, @@ -140,19 +133,13 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { if (events & FileReadyType::Read) { read_event.ready(); file_event->activate(FileReadyType::Write); - file_event->activate(FileReadyType::Closed); } if (events & FileReadyType::Write) { write_event.ready(); - file_event->activate(FileReadyType::Closed); - } - - if (events & FileReadyType::Closed) { - closed_event.ready(); } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); testing::InSequence s; // First loop iteration: handle scheduled read event and the real write event produced by poll. @@ -166,13 +153,10 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { EXPECT_CALL(prepare_watcher, ready()); EXPECT_CALL(fd_event, ready()); EXPECT_CALL(write_event, ready()); - EXPECT_CALL(closed_event, ready()); - // Third loop iteration: handle close event scheduled while handling write. - EXPECT_CALL(prepare_watcher, ready()); - EXPECT_CALL(fd_event, ready()); - EXPECT_CALL(closed_event, ready()); - // Fourth loop iteration: poll returned no new real events. - EXPECT_CALL(prepare_watcher, ready()); + if constexpr (Event::PlatformDefaultTriggerType != Event::FileTriggerType::EmulatedEdge) { + // Third loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } file_event->activate(FileReadyType::Read); dispatcher->run(Event::Dispatcher::RunType::NonBlock); @@ -189,7 +173,6 @@ TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { ReadyWatcher fd_event; ReadyWatcher read_event; ReadyWatcher write_event; - ReadyWatcher closed_event; ReadyWatcher prepare_watcher; evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, @@ -210,12 +193,8 @@ TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { if (events & FileReadyType::Write) { write_event.ready(); } - - if (events & FileReadyType::Closed) { - closed_event.ready(); - } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); testing::InSequence s; // First loop iteration: handle scheduled read event and the real write event produced by poll. diff --git a/test/common/filter/BUILD b/test/common/filter/BUILD index 4d9b44b7f211..1bc00f1e8c84 100644 --- a/test/common/filter/BUILD +++ b/test/common/filter/BUILD @@ -15,8 +15,8 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/filter:config_discovery_lib", "//source/common/json:json_loader_lib", - "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/http/router:config", + "//test/integration/filters:add_body_filter_config_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/server:server_mocks", diff --git a/test/common/filter/config_discovery_impl_test.cc b/test/common/filter/config_discovery_impl_test.cc index ede45f257725..7eebb2b237a0 100644 --- a/test/common/filter/config_discovery_impl_test.cc +++ b/test/common/filter/config_discovery_impl_test.cc @@ -361,8 +361,8 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig name: foo typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + body_size: 10 )EOF"; const auto response = TestUtility::parseYaml(response_yaml); @@ -372,8 +372,7 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { EXPECT_THROW_WITH_MESSAGE( callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), EnvoyException, - "Error: filter config has type URL envoy.extensions.filters.http.health_check.v3.HealthCheck " - "but " + "Error: filter config has type URL test.integration.filters.AddBodyFilterConfig but " "expect envoy.extensions.filters.http.router.v3.Router."); EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); } diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index 6ca626f78d36..e2b9217e21ef 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -361,6 +361,20 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::numberValue(1.0))); } + { + StreamInfo::BytesMeterSharedPtr upstream_bytes_meter{ + std::make_shared()}; + upstream_bytes_meter->addWireBytesReceived(1); + StreamInfoFormatter wire_bytes_received_format("UPSTREAM_WIRE_BYTES_RECEIVED"); + EXPECT_CALL(stream_info, getUpstreamBytesMeter()) + .WillRepeatedly(ReturnRef(upstream_bytes_meter)); + EXPECT_EQ("1", wire_bytes_received_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(wire_bytes_received_format.formatValue(request_headers, response_headers, + response_trailers, stream_info, body), + ProtoEq(ValueUtil::numberValue(1.0))); + } + { StreamInfoFormatter protocol_format("PROTOCOL"); absl::optional protocol = Http::Protocol::Http11; @@ -450,6 +464,20 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::numberValue(1.0))); } + { + StreamInfo::BytesMeterSharedPtr upstream_bytes_meter{ + std::make_shared()}; + upstream_bytes_meter->addWireBytesSent(1); + StreamInfoFormatter wire_bytes_sent_format("UPSTREAM_WIRE_BYTES_SENT"); + EXPECT_CALL(stream_info, getUpstreamBytesMeter()) + .WillRepeatedly(ReturnRef(upstream_bytes_meter)); + EXPECT_EQ("1", wire_bytes_sent_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(wire_bytes_sent_format.formatValue(request_headers, response_headers, + response_trailers, stream_info, body), + ProtoEq(ValueUtil::numberValue(1.0))); + } + { StreamInfoFormatter duration_format("DURATION"); absl::optional dur = std::chrono::nanoseconds(15000000); diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index f30efdd16d95..c6f2b3400668 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -175,11 +175,12 @@ TEST_F(EnvoyAsyncClientImplTest, RequestHttpStartFail) { Tracing::MockSpan active_span; Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) + EXPECT_CALL(active_span, spawnChild_(_, "async helloworld.Greeter.SayHello egress", _)) .WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("test_cluster"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("14"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); @@ -242,11 +243,12 @@ TEST_F(EnvoyAsyncClientImplTest, RequestHttpSendHeadersFail) { Tracing::MockSpan active_span; Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) + EXPECT_CALL(active_span, spawnChild_(_, "async helloworld.Greeter.SayHello egress", _)) .WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("test_cluster"))); EXPECT_CALL(*child_span, injectContext(_)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("13"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index e0eb023f3ef1..70fa4bf3e6f5 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -160,11 +160,12 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { Tracing::MockSpan active_span; Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - EXPECT_CALL(active_span, spawnChild_(_, "async test_cluster egress", _)) + EXPECT_CALL(active_span, spawnChild_(_, "async helloworld.Greeter.SayHello egress", _)) .WillOnce(Return(child_span)); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("test_cluster"))); + EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().UpstreamAddress), Eq("fake_address"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("14"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); EXPECT_CALL(*child_span, finishSpan()); diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index 00aebe7cefdf..372e0abd1d38 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -13,7 +13,10 @@ namespace Envoy { namespace Grpc { // Support parameterizing over state-of-the-world xDS vs delta xDS. -enum class SotwOrDelta { Sotw, Delta }; +enum class SotwOrDelta { Sotw, Delta, UnifiedSotw, UnifiedDelta }; + +// Unified or Legacy grpc mux implementation +enum class LegacyOrUnified { Legacy, Unified }; class BaseGrpcClientIntegrationParamTest { public: @@ -57,6 +60,26 @@ class GrpcClientIntegrationParamTest ClientType clientType() const override { return std::get<1>(GetParam()); } }; +class UnifiedOrLegacyMuxIntegrationParamTest + : public BaseGrpcClientIntegrationParamTest, + public testing::TestWithParam< + std::tuple> { +public: + ~UnifiedOrLegacyMuxIntegrationParamTest() override = default; + static std::string protocolTestParamsToString( + const ::testing::TestParamInfo< + std::tuple>& p) { + return fmt::format("{}_{}_{}", + std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", + std::get<1>(p.param) == ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", + std::get<2>(p.param) == LegacyOrUnified::Legacy ? "Legacy" : "Unified"); + } + Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } + ClientType clientType() const override { return std::get<1>(GetParam()); } + LegacyOrUnified unifiedOrLegacy() const { return std::get<2>(GetParam()); } + bool isUnified() const { return std::get<2>(GetParam()) == LegacyOrUnified::Unified; } +}; + class DeltaSotwIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, public testing::TestWithParam< @@ -95,6 +118,10 @@ class DeltaSotwIntegrationParamTest testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest()), \ testing::Values(Grpc::SotwOrDelta::Sotw, Grpc::SotwOrDelta::Delta)) +#define UNIFIED_LEGACY_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest()), \ + testing::Values(Grpc::LegacyOrUnified::Legacy, Grpc::LegacyOrUnified::Unified)) } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 852bfb3f2425..0ac701714938 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -47,10 +47,13 @@ using testing::_; using testing::AtLeast; +using testing::AtMost; using testing::Eq; using testing::Invoke; using testing::InvokeWithoutArgs; +using testing::IsEmpty; using testing::NiceMock; +using testing::Not; using testing::Return; using testing::ReturnRef; @@ -373,10 +376,13 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { request_msg.set_name(HELLO_REQUEST); Tracing::MockSpan active_span; - EXPECT_CALL(active_span, spawnChild_(_, "async fake_cluster egress", _)) + EXPECT_CALL(active_span, spawnChild_(_, "async helloworld.Greeter.SayHello egress", _)) .WillOnce(Return(request->child_span_)); EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq("fake_cluster"))); + EXPECT_CALL(*request->child_span_, + setTag(Eq(Tracing::Tags::get().UpstreamAddress), Not(IsEmpty()))) + .Times(AtMost(1)); EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*request->child_span_, injectContext(_)); diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 17512c3753da..6b3d1ee6c837 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -385,22 +385,13 @@ envoy_cc_test( ], ) -PATH_UTILITY_TEST_DEPS = [ - "//source/common/http:header_map_lib", - "//source/common/http:path_utility_lib", -] - envoy_cc_test( name = "path_utility_test", srcs = ["path_utility_test.cc"], - deps = PATH_UTILITY_TEST_DEPS, -) - -envoy_cc_test( - name = "legacy_path_utility_test", - srcs = ["path_utility_test.cc"], - args = ["--runtime-feature-disable-for-tests=envoy.reloadable_features.remove_forked_chromium_url"], - deps = PATH_UTILITY_TEST_DEPS, + deps = [ + "//source/common/http:header_map_lib", + "//source/common/http:path_utility_lib", + ], ) envoy_cc_test( diff --git a/test/common/http/alternate_protocols_cache_impl_test.cc b/test/common/http/alternate_protocols_cache_impl_test.cc index 651e371bb89f..bc47f7ca27f2 100644 --- a/test/common/http/alternate_protocols_cache_impl_test.cc +++ b/test/common/http/alternate_protocols_cache_impl_test.cc @@ -5,6 +5,7 @@ #include "gtest/gtest.h" +using testing::Invoke; using testing::NiceMock; namespace Envoy { @@ -13,14 +14,17 @@ namespace Http { namespace { class AlternateProtocolsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - AlternateProtocolsCacheImplTest() - : store_(new NiceMock()), - protocols_(simTime(), std::unique_ptr(store_), max_entries_) {} + AlternateProtocolsCacheImplTest() : store_(new NiceMock()) {} + + void initialize() { + protocols_ = std::make_unique( + simTime(), std::unique_ptr(store_), max_entries_); + } const size_t max_entries_ = 10; MockKeyValueStore* store_; - AlternateProtocolsCacheImpl protocols_; + std::unique_ptr protocols_; const std::string hostname1_ = "hostname1"; const std::string hostname2_ = "hostname2"; @@ -47,76 +51,86 @@ class AlternateProtocolsCacheImplTest : public testing::Test, public Event::Test std::vector protocols2_ = {protocol2_}; }; -TEST_F(AlternateProtocolsCacheImplTest, Init) { EXPECT_EQ(0, protocols_.size()); } +TEST_F(AlternateProtocolsCacheImplTest, Init) { + initialize(); + EXPECT_EQ(0, protocols_->size()); +} TEST_F(AlternateProtocolsCacheImplTest, SetAlternatives) { - EXPECT_EQ(0, protocols_.size()); + initialize(); + EXPECT_EQ(0, protocols_->size()); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); - protocols_.setAlternatives(origin1_, protocols1_); - EXPECT_EQ(1, protocols_.size()); + protocols_->setAlternatives(origin1_, protocols1_); + EXPECT_EQ(1, protocols_->size()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternatives) { + initialize(); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); - protocols_.setAlternatives(origin1_, protocols1_); + protocols_->setAlternatives(origin1_, protocols1_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols1_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterReplacement) { + initialize(); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); - protocols_.setAlternatives(origin1_, protocols1_); + protocols_->setAlternatives(origin1_, protocols1_); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); - protocols_.setAlternatives(origin1_, protocols2_); + protocols_->setAlternatives(origin1_, protocols2_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols2_, protocols.ref()); EXPECT_NE(protocols1_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesForMultipleOrigins) { + initialize(); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); - protocols_.setAlternatives(origin1_, protocols1_); + protocols_->setAlternatives(origin1_, protocols1_); EXPECT_CALL(*store_, addOrUpdate("https://hostname2:2", "alpn2=\"hostname2:2\"; ma=10")); - protocols_.setAlternatives(origin2_, protocols2_); + protocols_->setAlternatives(origin2_, protocols2_); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols1_, protocols.ref()); - protocols = protocols_.findAlternatives(origin2_); + protocols = protocols_->findAlternatives(origin2_); EXPECT_EQ(protocols2_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterExpiration) { + initialize(); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); - protocols_.setAlternatives(origin1_, protocols1_); + protocols_->setAlternatives(origin1_, protocols1_); simTime().setMonotonicTime(expiration1_ + Seconds(1)); EXPECT_CALL(*store_, remove("https://hostname1:1")); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_FALSE(protocols.has_value()); - EXPECT_EQ(0, protocols_.size()); + EXPECT_EQ(0, protocols_->size()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterPartialExpiration) { + initialize(); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5,alpn2=\"hostname2:2\"; ma=10")); std::vector both = {protocol1_, protocol2_}; - protocols_.setAlternatives(origin1_, both); + protocols_->setAlternatives(origin1_, both); simTime().setMonotonicTime(expiration1_ + Seconds(1)); EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(protocols2_.size(), protocols->size()); EXPECT_EQ(protocols2_, protocols.ref()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterTruncation) { + initialize(); std::vector expected_protocols; for (size_t i = 0; i < 10; ++i) { protocol1_.port_++; @@ -127,16 +141,48 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterTruncation) { full_protocols.push_back(protocol1_); full_protocols.push_back(protocol1_); - protocols_.setAlternatives(origin1_, full_protocols); + protocols_->setAlternatives(origin1_, full_protocols); OptRef> protocols = - protocols_.findAlternatives(origin1_); + protocols_->findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); EXPECT_EQ(10, protocols->size()); EXPECT_EQ(expected_protocols, protocols.ref()); } +TEST_F(AlternateProtocolsCacheImplTest, ToAndFromOriginString) { + initialize(); + std::string origin_str = "https://hostname1:1"; + absl::optional origin = + AlternateProtocolsCacheImpl::stringToOrigin(origin_str); + ASSERT_TRUE(origin.has_value()); + EXPECT_EQ(1, origin.value().port_); + EXPECT_EQ("https", origin.value().scheme_); + EXPECT_EQ("hostname1", origin.value().hostname_); + std::string output = AlternateProtocolsCacheImpl::originToString(origin.value()); + EXPECT_EQ(origin_str, output); + + // Test with no scheme or port. + std::string origin_str2 = "://:1"; + absl::optional origin2 = + AlternateProtocolsCacheImpl::stringToOrigin(origin_str2); + ASSERT_TRUE(origin2.has_value()); + EXPECT_EQ(1, origin2.value().port_); + EXPECT_EQ("", origin2.value().scheme_); + EXPECT_EQ("", origin2.value().hostname_); + std::string output2 = AlternateProtocolsCacheImpl::originToString(origin2.value()); + EXPECT_EQ(origin_str2, output2); + + // No port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("https://").has_value()); + // Non-numeric port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("://asd:dsa").has_value()); + // Negative port. + EXPECT_TRUE(!AlternateProtocolsCacheImpl::stringToOrigin("https://:-1").has_value()); +} + TEST_F(AlternateProtocolsCacheImplTest, MaxEntries) { - EXPECT_EQ(0, protocols_.size()); + initialize(); + EXPECT_EQ(0, protocols_->size()); const std::string hostname = "hostname"; for (uint32_t i = 0; i <= max_entries_; ++i) { const AlternateProtocolsCache::Origin origin = {https_, hostname, i}; @@ -147,11 +193,12 @@ TEST_F(AlternateProtocolsCacheImplTest, MaxEntries) { if (i == max_entries_) { EXPECT_CALL(*store_, remove("https://hostname:0")); } - protocols_.setAlternatives(origin, protocols); + protocols_->setAlternatives(origin, protocols); } } TEST_F(AlternateProtocolsCacheImplTest, ToAndFromString) { + initialize(); auto testAltSvc = [&](const std::string& original_alt_svc, const std::string& expected_alt_svc) -> void { absl::optional> protocols = @@ -193,6 +240,24 @@ TEST_F(AlternateProtocolsCacheImplTest, ToAndFromString) { testAltSvc("h3-29=\":443\"; ma=86460", "h3-29=\":443\"; ma=86460"); } +TEST_F(AlternateProtocolsCacheImplTest, CacheLoad) { + EXPECT_CALL(*store_, iterate(_)).WillOnce(Invoke([&](KeyValueStore::ConstIterateCb fn) { + fn("foo", "bar"); + fn("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5"); + })); + + // When the cache is created, there should be a warning log for the bad cache + // entry. + EXPECT_LOG_CONTAINS("warn", "Unable to parse cache entry with key: foo value: bar", + { initialize(); }); + + EXPECT_CALL(*store_, addOrUpdate(_, _)).Times(0); + OptRef> protocols = + protocols_->findAlternatives(origin1_); + ASSERT_TRUE(protocols.has_value()); + EXPECT_EQ(protocols1_, protocols.ref()); +} + } // namespace } // namespace Http } // namespace Envoy diff --git a/test/common/http/alternate_protocols_cache_manager_test.cc b/test/common/http/alternate_protocols_cache_manager_test.cc index c24c78594717..7fa9543c6cc0 100644 --- a/test/common/http/alternate_protocols_cache_manager_test.cc +++ b/test/common/http/alternate_protocols_cache_manager_test.cc @@ -39,6 +39,7 @@ class AlternateProtocolsCacheManagerTest : public testing::Test, const std::string name2_ = "name2"; const int max_entries1_ = 10; const int max_entries2_ = 20; + Event::MockDispatcher dispatcher_; envoy::config::core::v3::AlternateProtocolsCacheOptions options1_; envoy::config::core::v3::AlternateProtocolsCacheOptions options2_; @@ -53,33 +54,33 @@ TEST_F(AlternateProtocolsCacheManagerTest, FactoryGet) { TEST_F(AlternateProtocolsCacheManagerTest, GetCache) { initialize(); - AlternateProtocolsCacheSharedPtr cache = manager_->getCache(options1_); + AlternateProtocolsCacheSharedPtr cache = manager_->getCache(options1_, dispatcher_); EXPECT_NE(nullptr, cache); - EXPECT_EQ(cache, manager_->getCache(options1_)); + EXPECT_EQ(cache, manager_->getCache(options1_, dispatcher_)); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { EXPECT_CALL(context_.options_, concurrency()).WillOnce(Return(5)); options1_.mutable_key_value_store_config(); initialize(); - EXPECT_THROW_WITH_REGEX(manager_->getCache(options1_), EnvoyException, + EXPECT_THROW_WITH_REGEX(manager_->getCache(options1_, dispatcher_), EnvoyException, "options has key value store but Envoy has concurrency = 5"); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForDifferentOptions) { initialize(); - AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); - AlternateProtocolsCacheSharedPtr cache2 = manager_->getCache(options2_); + AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_, dispatcher_); + AlternateProtocolsCacheSharedPtr cache2 = manager_->getCache(options2_, dispatcher_); EXPECT_NE(nullptr, cache2); EXPECT_NE(cache1, cache2); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForConflictingOptions) { initialize(); - AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); + AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_, dispatcher_); options2_.set_name(options1_.name()); EXPECT_THROW_WITH_REGEX( - manager_->getCache(options2_), EnvoyException, + manager_->getCache(options2_, dispatcher_), EnvoyException, "options specified alternate protocols cache 'name1' with different settings.*"); } diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 5951dced3199..1fc0b8abeac6 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -916,20 +916,26 @@ TEST(ValidateHeaders, Connect) { TEST(ValidateHeaders, ContentLength) { bool should_close_connection; - EXPECT_EQ(HeaderUtility::HeaderValidationResult::ACCEPT, - HeaderUtility::validateContentLength("1,1", true, should_close_connection)); + size_t content_length{0}; + EXPECT_EQ( + HeaderUtility::HeaderValidationResult::ACCEPT, + HeaderUtility::validateContentLength("1,1", true, should_close_connection, content_length)); EXPECT_FALSE(should_close_connection); + EXPECT_EQ(1, content_length); - EXPECT_EQ(HeaderUtility::HeaderValidationResult::REJECT, - HeaderUtility::validateContentLength("1,2", true, should_close_connection)); + EXPECT_EQ( + HeaderUtility::HeaderValidationResult::REJECT, + HeaderUtility::validateContentLength("1,2", true, should_close_connection, content_length)); EXPECT_FALSE(should_close_connection); - EXPECT_EQ(HeaderUtility::HeaderValidationResult::REJECT, - HeaderUtility::validateContentLength("1,2", false, should_close_connection)); + EXPECT_EQ( + HeaderUtility::HeaderValidationResult::REJECT, + HeaderUtility::validateContentLength("1,2", false, should_close_connection, content_length)); EXPECT_TRUE(should_close_connection); - EXPECT_EQ(HeaderUtility::HeaderValidationResult::REJECT, - HeaderUtility::validateContentLength("-1", false, should_close_connection)); + EXPECT_EQ( + HeaderUtility::HeaderValidationResult::REJECT, + HeaderUtility::validateContentLength("-1", false, should_close_connection, content_length)); EXPECT_TRUE(should_close_connection); } diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index d70c3be17a8b..86e8be81686f 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -2260,25 +2260,6 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasingLegacy) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", "false"}}); - codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; - - initialize(); - - MockResponseDecoder response_decoder; - Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - - TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {"my-custom-header", "hey"}}; - EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); - EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); -} - TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 51c460a4d2f3..a23b056b9a77 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -55,18 +55,6 @@ class Http2CodecImplTestFixture { static bool slowContainsStreamId(int id, ConnectionImpl& connection) { return connection.slowContainsStreamId(id); } - // The Http::Connection::dispatch method does not throw (any more). However unit tests in this - // file use codecs for sending test data through mock network connections to the codec under test. - // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under - // test, through mock connections and sending codec. As a result error returned by the dispatch - // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note - // that exception goes only through the mock network connection and sending codec, i.e. it is - // thrown only through the test harness code. Specific exception types are to distinguish error - // codes returned when processing requests or responses. - // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec - // under test through the ON_CALL expectations in the - // setupDefaultConnectionMocks() method. This will make the exceptions below - // unnecessary. struct ClientCodecError : public std::runtime_error { ClientCodecError(Http::Status&& status) : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} @@ -84,19 +72,19 @@ class Http2CodecImplTestFixture { struct ConnectionWrapper { Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { connection_ = &connection; - Http::Status status = Http::okStatus(); buffer_.add(data); return dispatchBufferedData(); } Http::Status dispatchBufferedData() { Http::Status status = Http::okStatus(); - if (!dispatching_) { + if (!dispatching_ && status_.ok()) { while (buffer_.length() > 0) { dispatching_ = true; status = connection_->dispatch(buffer_); if (!status.ok()) { - // Exit early if we hit an error status. + // Exit early if we hit an error status and record it for verification in the test. + status_.Update(status); return status; } dispatching_ = false; @@ -108,6 +96,7 @@ class Http2CodecImplTestFixture { bool dispatching_{}; Buffer::OwnedImpl buffer_; ConnectionImpl* connection_{}; + Http::Status status_; }; enum SettingsTupleIndex { @@ -168,17 +157,11 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - auto status = server_wrapper_.dispatch(data, *server_); - if (!status.ok()) { - throw ServerCodecError(std::move(status)); - } + server_wrapper_.dispatch(data, *server_).IgnoreError(); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - auto status = client_wrapper_.dispatch(data, *client_); - if (!status.ok()) { - throw ClientCodecError(std::move(status)); - } + client_wrapper_.dispatch(data, *client_).IgnoreError(); })); } @@ -421,7 +404,9 @@ TEST_P(Http2CodecImplTest, TrailerStatus) { response_encoder_->encodeHeaders(response_headers, false); // nghttp2 doesn't allow :status in trailers - EXPECT_THROW(response_encoder_->encode100ContinueHeaders(continue_headers), ClientCodecError); + response_encoder_->encode100ContinueHeaders(continue_headers); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -471,7 +456,9 @@ TEST_P(Http2CodecImplTest, Invalid101SwitchingProtocols) { TestResponseHeaderMapImpl upgrade_headers{{":status", "101"}}; EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_THROW(response_encoder_->encodeHeaders(upgrade_headers, false), ClientCodecError); + response_encoder_->encodeHeaders(upgrade_headers, false); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } @@ -484,7 +471,9 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, true).ok()); TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + response_encoder_->encodeHeaders(continue_headers, true); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } @@ -553,7 +542,9 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); response_encoder_->encode100ContinueHeaders(continue_headers); - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + response_encoder_->encodeHeaders(continue_headers, true); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -611,7 +602,9 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { "debug", "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " "value: [3]", - EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); + response_encoder_->encodeHeaders(response_headers, false)); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -705,8 +698,6 @@ TEST_P(Http2CodecImplTest, TrailingHeaders) { // When having empty trailers, codec submits empty buffer and end_stream instead. TEST_P(Http2CodecImplTest, IgnoreTrailingEmptyHeaders) { TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "true"}}); initialize(); @@ -732,35 +723,6 @@ TEST_P(Http2CodecImplTest, IgnoreTrailingEmptyHeaders) { response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); } -// When having empty trailers and "envoy.reloadable_features.http2_skip_encoding_empty_trailers" is -// turned off, codec submits empty trailers. -TEST_P(Http2CodecImplTest, SubmitTrailingEmptyHeaders) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "false"}}); - - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, false).ok()); - EXPECT_CALL(request_decoder_, decodeData(_, false)); - Buffer::OwnedImpl hello("hello"); - request_encoder_->encodeData(hello, false); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); - request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{}); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(response_decoder_, decodeData(_, false)); - Buffer::OwnedImpl world("world"); - response_encoder_->encodeData(world, false); - EXPECT_CALL(response_decoder_, decodeTrailers_(_)); - response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); -} - TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { initialize(); @@ -872,8 +834,11 @@ TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { metadata_map_vector.push_back(std::move(metadata_map_ptr)); corrupt_metadata_frame_ = true; - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, - "The user callback function failed"); + request_encoder_->encodeMetadata(metadata_map_vector); + // The error is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "The user callback function failed"); } // Encode response metadata while dispatching request data from the client, so @@ -1040,7 +1005,7 @@ TEST_P(Http2CodecImplTest, DumpsStreamlessConnectionWithoutAllocatingMemory) { "max_headers_kb_: 60, max_headers_count_: 100, " "per_stream_buffer_limit_: 268435456, allow_metadata_: 0, " "stream_error_on_invalid_http_messaging_: 0, is_outbound_flood_monitored_control_frame_: " - "0, skip_encoding_empty_trailers_: 1, dispatching_: 0, raised_goaway_: 0, " + "0, dispatching_: 0, raised_goaway_: 0, " "pending_deferred_reset_streams_.size(): 0\n" " &protocol_constraints_: \n" " ProtocolConstraints")); @@ -1930,6 +1895,8 @@ TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsConsumeErr EXPECT_EQ(1, server_stats_store_.counter("http2.tx_reset").value()); EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + // The server codec should not fail since the error is "consumed". + EXPECT_TRUE(server_wrapper_.status_.ok()); } TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsIgnoreError) { @@ -1968,14 +1935,17 @@ TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsIgnoreErro request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeHeaders(request_headers, true).IgnoreError(), - ServerCodecError, "The user callback function failed"); + EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, true).ok()); + // The server codec should fail since there are no available streams. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "The user callback function failed"); EXPECT_EQ(0, server_stats_store_.counter("http2.stream_refused_errors").value()); EXPECT_EQ(0, server_stats_store_.counter("http2.tx_reset").value()); // Not verifying the http2.streams_active server/client gauges here as the - // EXPECT_THROW_WITH_MESSAGE above doesn't let us fully capture the behavior of the real system. + // test dispatch function doesn't let us fully capture the behavior of the real system. // In the real world, the status returned from dispatch would trigger a connection close which // would result in the active stream gauges to go down to 0. } @@ -2468,8 +2438,11 @@ TEST_P(Http2CodecImplTest, PingFlood) { buffer.move(frame); })); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many control frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The PING flood is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many control frames in the outbound queue."); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } @@ -2538,8 +2511,11 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many control frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 PING too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many control frames in the outbound queue."); } // Verify that codec detects flood of outbound HEADER frames @@ -2709,8 +2685,11 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { } // Send one PING frame above the outbound queue size limit EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 frame too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many frames in the outbound queue."); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } @@ -2805,8 +2784,11 @@ TEST_P(Http2CodecImplTest, MetadataFlood) { TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many PRIORITY frames"); + client_->sendPendingFrames().IgnoreError(); + // The PRIORITY flood is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many PRIORITY frames"); } TEST_P(Http2CodecImplTest, PriorityFloodOverride) { @@ -2818,8 +2800,11 @@ TEST_P(Http2CodecImplTest, PriorityFloodOverride) { TEST_P(Http2CodecImplTest, WindowUpdateFlood) { windowUpdateFlood(); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many WINDOW_UPDATE frames"); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 WINDOW_UPDATE frame too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many WINDOW_UPDATE frames"); } TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 7a1c272a1796..23c86c527b22 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -1488,10 +1488,6 @@ TEST_F(Http2ConnPoolImplTest, PreconnectWithMultiplexing) { } TEST_F(Http2ConnPoolImplTest, PreconnectWithSettings) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.improved_stream_limit_handling", "true"}}); - cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2); ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index ae7073574d57..e602f2108df0 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -91,6 +91,69 @@ TEST(HttpUtility, parseQueryString) { "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); } +TEST(HttpUtility, stripQueryString) { + EXPECT_EQ(Utility::stripQueryString(HeaderString("/")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?x=1")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/?x=1&y=2")), "/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?hello=there")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo?hello=there&good=bye")), "/foo"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/?")), "/foo/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/?x=1")), "/foo/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?a=b")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar?a=b&b=c")), "/foo/bar"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?x=1")), "/foo/bar/"); + EXPECT_EQ(Utility::stripQueryString(HeaderString("/foo/bar/?x=1&y=2")), "/foo/bar/"); +} + +TEST(HttpUtility, replaceQueryString) { + // Replace with nothing + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?x=0"), Utility::QueryParams()), "/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a"), Utility::QueryParams()), "/a"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a/"), Utility::QueryParams()), "/a/"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a/?y=5"), Utility::QueryParams()), "/a/"); + // Replace with x=1 + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/?x=0"), Utility::QueryParams({{"x", "1"}})), + "/?x=1"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/a?x=0"), Utility::QueryParams({{"x", "1"}})), + "/a?x=1"); + EXPECT_EQ( + Utility::replaceQueryString(HeaderString("/a/?x=0"), Utility::QueryParams({{"x", "1"}})), + "/a/?x=1"); + // More replacements + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo"), + Utility::QueryParams({{"x", "1"}, {"z", "3"}})), + "/foo?x=1&z=3"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo?z=2"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo?y=9"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo?x=1&y=5"); + // More path components + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo/bar?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?y=9&a=b"), + Utility::QueryParams({{"x", "1"}, {"y", "5"}})), + "/foo/bar?x=1&y=5"); + EXPECT_EQ(Utility::replaceQueryString(HeaderString("/foo/bar?y=11&z=7"), + Utility::QueryParams({{"a", "b"}, {"x", "1"}, {"y", "5"}})), + "/foo/bar?a=b&x=1&y=5"); +} + TEST(HttpUtility, getResponseStatus) { EXPECT_THROW(Utility::getResponseStatus(TestResponseHeaderMapImpl{}), CodecClientException); EXPECT_EQ(200U, Utility::getResponseStatus(TestResponseHeaderMapImpl{{":status", "200"}})); diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 2d32f100eca4..228068e269aa 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -108,68 +108,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "apple_dns_impl_test", - srcs = select({ - "//bazel:apple": ["apple_dns_impl_test.cc"], - "//conditions:default": [], - }), - external_deps = ["abseil_synchronization"], - deps = [ - "//envoy/event:dispatcher_interface", - "//envoy/network:dns_interface", - "//source/common/event:dispatcher_includes", - "//envoy/event:file_event_interface", - "//source/common/stats:isolated_store_lib", - "//source/common/event:dispatcher_lib", - "//source/common/network:address_lib", - "//source/common/common:random_generator_lib", - "//test/test_common:environment_lib", - "//test/test_common:network_utility_lib", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "//test/test_common:threadsafe_singleton_injector_lib", - "//test/mocks/event:event_mocks", - ] + select({ - "//bazel:apple": [ - "//source/common/network:dns_lib", - ], - "//conditions:default": [], - }), -) - -envoy_cc_test( - name = "dns_impl_test", - srcs = ["dns_impl_test.cc"], - args = [ - # Used in createDnsResolver to force creation of DnsResolverImpl when running test on macOS. - "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", - ], - # TODO(envoyproxy/windows-dev): Under winsock2 this is behaving unusually for windows, even as - # 127.0.0.1 and ::1 are explicitly added to `c:\windows\system32\drivers\etc\hosts` ... see: - # https://gist.github.com/wrowe/24fe5b93b58bb444bce7ecc134905395 - tags = ["fails_on_clang_cl"], - deps = [ - "//envoy/event:dispatcher_interface", - "//envoy/network:address_interface", - "//envoy/network:dns_interface", - "//source/common/buffer:buffer_lib", - "//source/common/event:dispatcher_includes", - "//source/common/event:dispatcher_lib", - "//source/common/network:address_lib", - "//source/common/network:dns_lib", - "//source/common/network:filter_lib", - "//source/common/network:listen_socket_lib", - "//source/common/stats:stats_lib", - "//source/common/stream_info:stream_info_lib", - "//test/mocks/network:network_mocks", - "//test/test_common:environment_lib", - "//test/test_common:network_utility_lib", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "filter_manager_impl_test", srcs = ["filter_manager_impl_test.cc"], diff --git a/test/common/network/dns_resolver/BUILD b/test/common/network/dns_resolver/BUILD new file mode 100644 index 000000000000..a357f6f20a83 --- /dev/null +++ b/test/common/network/dns_resolver/BUILD @@ -0,0 +1,23 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "dns_factory_test", + srcs = ["dns_factory_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], + deps = [ + "//source/common/network/dns_resolver:dns_factory_util_lib", + "//source/extensions/network/dns_resolver/cares:config", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/common/network/dns_resolver/dns_factory_test.cc b/test/common/network/dns_resolver/dns_factory_test.cc new file mode 100644 index 000000000000..813b64a52971 --- /dev/null +++ b/test/common/network/dns_resolver/dns_factory_test.cc @@ -0,0 +1,278 @@ +#include "source/common/network/address_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" + +#include "test/mocks/network/mocks.h" + +namespace Envoy { +namespace Network { + +class DnsFactoryTest : public testing::Test { +public: + // Verify typed config is c-ares, and unpack to c-ares object. + void verifyCaresDnsConfigAndUnpack( + envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config, + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig& cares) { + // Verify typed DNS resolver config is c-ares. + EXPECT_EQ(typed_dns_resolver_config.name(), std::string(Network::CaresDnsResolver)); + EXPECT_EQ(typed_dns_resolver_config.typed_config().type_url(), + "type.googleapis.com/" + "envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig"); + typed_dns_resolver_config.typed_config().UnpackTo(&cares); + } + + // Verify the c-ares object is default. + void verifyCaresDnsConfigDefault( + const envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig& cares) { + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_TRUE(cares.resolvers().empty()); + } +}; + +// Test default c-ares DNS resolver typed config creation is expected. +TEST_F(DnsFactoryTest, MakeDefaultCaresDnsResolverTest) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + makeDefaultCaresDnsResolverConfig(typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); +} + +// Test default apple DNS resolver typed config creation is expected. +TEST_F(DnsFactoryTest, MakeDefaultAppleDnsResolverTest) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + makeDefaultAppleDnsResolverConfig(typed_dns_resolver_config); + EXPECT_EQ(typed_dns_resolver_config.name(), std::string(Network::AppleDnsResolver)); + EXPECT_EQ( + typed_dns_resolver_config.typed_config().type_url(), + "type.googleapis.com/envoy.extensions.network.dns_resolver.apple.v3.AppleDnsResolverConfig"); +} + +// Test default DNS resolver typed config creation based on build system and configuration is +// expected. +TEST_F(DnsFactoryTest, MakeDefaultDnsResolverTest) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + makeDefaultDnsResolverConfig(typed_dns_resolver_config); + // In this test case, makeDefaultDnsResolverConfig() creates an default c-ares DNS typed config. + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); +} + +// Test handleLegacyDnsResolverData() function with DnsFilterConfig type. +TEST_F(DnsFactoryTest, LegacyDnsResolverDataDnsFilterConfig) { + envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig::ClientContextConfig + dns_filter_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + handleLegacyDnsResolverData(dns_filter_config, typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); +} + +// Test handleLegacyDnsResolverData() function with Cluster type, and default config. +TEST_F(DnsFactoryTest, LegacyDnsResolverDataClusterConfigDefault) { + envoy::config::cluster::v3::Cluster cluster_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + handleLegacyDnsResolverData(cluster_config, typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); +} + +// Test handleLegacyDnsResolverData() function with Cluster type, and non-default config. +TEST_F(DnsFactoryTest, LegacyDnsResolverDataClusterConfigNonDefault) { + envoy::config::cluster::v3::Cluster cluster_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cluster_config.set_use_tcp_for_dns_lookups(true); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers); + cluster_config.add_dns_resolvers()->MergeFrom(resolvers); + handleLegacyDnsResolverData(cluster_config, typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_FALSE(cares.resolvers().empty()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); +} + +// Test handleLegacyDnsResolverData() function with Bootstrap type, and non-default config. +TEST_F(DnsFactoryTest, LegacyDnsResolverDataBootstrapConfigNonDefault) { + envoy::config::bootstrap::v3::Bootstrap bootstrap_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + bootstrap_config.set_use_tcp_for_dns_lookups(true); + handleLegacyDnsResolverData(bootstrap_config, typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_TRUE(cares.resolvers().empty()); +} + +// Test handleLegacyDnsResolverData() function with DnsCacheConfig type, and non-default config. +TEST_F(DnsFactoryTest, LegacyDnsResolverDataDnsCacheConfigNonDefault) { + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig dns_cache_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + dns_cache_config.set_use_tcp_for_dns_lookups(true); + handleLegacyDnsResolverData(dns_cache_config, typed_dns_resolver_config); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_TRUE(cares.resolvers().empty()); +} + +// Test checkDnsResolutionConfigExist() function with Bootstrap type, +// and dns_resolution_config exists. +TEST_F(DnsFactoryTest, CheckDnsResolutionConfigExistWithBoostrap) { + envoy::config::bootstrap::v3::Bootstrap bootstrap_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + bootstrap_config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_use_tcp_for_dns_lookups(true); + bootstrap_config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_no_default_search_domain(true); + + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers); + bootstrap_config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers); + EXPECT_TRUE(checkDnsResolutionConfigExist(bootstrap_config, typed_dns_resolver_config)); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_FALSE(cares.resolvers().empty()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); +} + +// Test checkTypedDnsResolverConfigExist() function with Bootstrap type, +// and typed_dns_resolver_config exists. +TEST_F(DnsFactoryTest, CheckTypedDnsResolverConfigExistWithBoostrap) { + envoy::config::bootstrap::v3::Bootstrap bootstrap_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + + cares.mutable_dns_resolver_options()->set_use_tcp_for_dns_lookups(true); + cares.mutable_dns_resolver_options()->set_no_default_search_domain(true); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + bootstrap_config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + EXPECT_TRUE(bootstrap_config.has_typed_dns_resolver_config()); + typed_dns_resolver_config.Clear(); + cares.Clear(); + + EXPECT_TRUE(checkTypedDnsResolverConfigExist(bootstrap_config, typed_dns_resolver_config)); + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_FALSE(cares.resolvers().empty()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); +} + +// Test checkTypedDnsResolverConfigExist() function with Bootstrap type. +// A garbage typed_dns_resolver_config type foo exists with dns_resolution_config. +// In this case, the typed_dns_resolver_config is copied over. +TEST_F(DnsFactoryTest, CheckBothTypedAndDnsResolutionConfigExistWithBoostrapWrongType) { + envoy::config::bootstrap::v3::Bootstrap bootstrap_config; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url("type.googleapis.com/foo"); + typed_dns_resolver_config.mutable_typed_config()->set_value("bar"); + typed_dns_resolver_config.set_name("baz"); + bootstrap_config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + EXPECT_TRUE(bootstrap_config.has_typed_dns_resolver_config()); + typed_dns_resolver_config.Clear(); + + // setup dns_resolution_config with multiple resolver addresses + envoy::config::core::v3::Address resolvers0; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers0); + bootstrap_config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers0); + envoy::config::core::v3::Address resolvers1; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("5.6.7.8", 8081), + resolvers1); + bootstrap_config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers1); + bootstrap_config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_use_tcp_for_dns_lookups(true); + bootstrap_config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_no_default_search_domain(true); + + // setup use_tcp_for_dns_lookups + bootstrap_config.set_use_tcp_for_dns_lookups(false); + + EXPECT_TRUE(checkTypedDnsResolverConfigExist(bootstrap_config, typed_dns_resolver_config)); + EXPECT_FALSE(checkUseAppleApiForDnsLookups(typed_dns_resolver_config)); + typed_dns_resolver_config = makeDnsResolverConfig(bootstrap_config); + + // verify the typed_dns_resolver_config data matching DNS resolution config + EXPECT_EQ(typed_dns_resolver_config.name(), "baz"); + EXPECT_EQ(typed_dns_resolver_config.typed_config().type_url(), "type.googleapis.com/foo"); + EXPECT_EQ(typed_dns_resolver_config.typed_config().value(), "bar"); +} + +// Test default DNS resolver factory creation based on build system and configuration is +// expected. +TEST_F(DnsFactoryTest, MakeDefaultDnsResolverFactoryTestInCares) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDefaultDnsResolverFactory(typed_dns_resolver_config); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); + EXPECT_EQ(dns_resolver_factory.name(), std::string(CaresDnsResolver)); +} + +// Test DNS resolver factory creation from proto without typed config. +TEST_F(DnsFactoryTest, MakeDnsResolverFactoryFromProtoTestInCaresWithoutTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDnsResolverFactoryFromProto(envoy::config::bootstrap::v3::Bootstrap(), + typed_dns_resolver_config); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); + EXPECT_EQ(dns_resolver_factory.name(), std::string(CaresDnsResolver)); +} + +// Test DNS resolver factory creation from proto with valid typed config +TEST_F(DnsFactoryTest, MakeDnsResolverFactoryFromProtoTestInCaresWithGoodTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url( + "type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig"); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDnsResolverFactoryFromProto(config, typed_dns_resolver_config); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + verifyCaresDnsConfigDefault(cares); + EXPECT_EQ(dns_resolver_factory.name(), std::string(CaresDnsResolver)); +} + +// Test DNS resolver factory creation from proto with invalid typed config +TEST_F(DnsFactoryTest, MakeDnsResolverFactoryFromProtoTestInCaresWithInvalidTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url("type.googleapis.com/foo"); + typed_dns_resolver_config.set_name("bar"); + config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + EXPECT_THROW_WITH_MESSAGE( + Envoy::Network::createDnsResolverFactoryFromProto(config, typed_dns_resolver_config), + Envoy::EnvoyException, "Didn't find a registered implementation for name: 'bar'"); +} + +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/happy_eyeballs_connection_impl_test.cc b/test/common/network/happy_eyeballs_connection_impl_test.cc index 743fa110a606..faa25b383a5b 100644 --- a/test/common/network/happy_eyeballs_connection_impl_test.cc +++ b/test/common/network/happy_eyeballs_connection_impl_test.cc @@ -89,7 +89,7 @@ class HappyEyeballsConnectionImplTest : public testing::Test { } protected: - Event::MockDispatcher dispatcher_; + testing::NiceMock dispatcher_; testing::StrictMock* failover_timer_; MockTransportSocketFactory transport_socket_factory_; TransportSocketOptionsConstSharedPtr transport_socket_options_; @@ -134,6 +134,7 @@ TEST_F(HappyEyeballsConnectionImplTest, ConnectFailed) { EXPECT_CALL(*next_connections_.back(), connect()); EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(*failover_timer_, disableTimer()); EXPECT_CALL(*failover_timer_, enableTimer(std::chrono::milliseconds(300), nullptr)); connection_callbacks_[0]->onEvent(ConnectionEvent::RemoteClose); @@ -686,8 +687,10 @@ TEST_F(HappyEyeballsConnectionImplTest, SetConnectionStats) { next_connections_.push_back(std::make_unique>()); // setConnectionStats() should be applied to the newly created connection. + // Here, it is using stats latched by the happy eyeballs connection and so + // will be its own unique data structure. EXPECT_CALL(*next_connections_.back(), setConnectionStats(_)) - .WillOnce(Invoke([&](const Connection::ConnectionStats& s) -> void { EXPECT_EQ(&s, &cs); })); + .WillOnce(Invoke([&](const Connection::ConnectionStats& s) -> void { EXPECT_NE(&s, &cs); })); timeOutAndStartNextAttempt(); connectSecondAttempt(); diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index 8f514e993084..4ca83f960449 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -35,16 +35,15 @@ TEST(ConnectionSocketImplTest, LowerCaseRequestedServerName) { template class ListenSocketImplTest : public testing::TestWithParam { + using ListenSocketType = NetworkListenSocket>; + protected: ListenSocketImplTest() : version_(GetParam()) {} const Address::IpVersion version_; template - std::unique_ptr createListenSocketPtr(Args&&... args) { - using NetworkSocketTraitType = NetworkSocketTrait; - - return std::make_unique>( - std::forward(args)...); + std::unique_ptr createListenSocketPtr(Args&&... args) { + return std::make_unique(std::forward(args)...); } void testBindSpecificPort() { @@ -76,7 +75,7 @@ class ListenSocketImplTest : public testing::TestWithParam { EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_PREBIND)) .WillOnce(Return(true)); options->emplace_back(std::move(option)); - std::unique_ptr socket1; + std::unique_ptr socket1; try { socket1 = createListenSocketPtr(addr, options, true); } catch (SocketBindException& e) { @@ -139,6 +138,19 @@ class ListenSocketImplTest : public testing::TestWithParam { EXPECT_GT(socket->connectionInfoProvider().localAddress()->ip()->port(), 0U); EXPECT_EQ(Type, socket->socketType()); } + + // Verify that a listen sockets that do not bind to port can be duplicated and closed. + void testNotBindToPort() { + auto local_address = version_ == Address::IpVersion::v4 ? Utility::getIpv6AnyAddress() + : Utility::getIpv4AnyAddress(); + auto socket = NetworkListenSocket>(local_address, nullptr, + /*bind_to_port=*/false); + auto dup_socket = socket.duplicate(); + EXPECT_FALSE(socket.isOpen()); + EXPECT_FALSE(dup_socket->isOpen()); + socket.close(); + dup_socket->close(); + } }; using ListenSocketImplTestTcp = ListenSocketImplTest; @@ -162,9 +174,23 @@ class TestListenSocket : public ListenSocketImpl { public: TestListenSocket(Address::InstanceConstSharedPtr address) : ListenSocketImpl(std::make_unique(), address) {} + + TestListenSocket(Address::IpVersion ip_version) + : ListenSocketImpl(/*io_handle=*/nullptr, ip_version == Address::IpVersion::v4 + ? Utility::getIpv4AnyAddress() + : Utility::getIpv6AnyAddress()) {} Socket::Type socketType() const override { return Socket::Type::Stream; } + + bool isOpen() const override { return ListenSocketImpl::isOpen(); } + void close() override { ListenSocketImpl::close(); } }; +TEST_P(ListenSocketImplTestTcp, NonIoHandleListenSocket) { + TestListenSocket sock(version_); + EXPECT_FALSE(sock.isOpen()); + sock.close(); +} + TEST_P(ListenSocketImplTestTcp, SetLocalAddress) { std::string address_str = "10.1.2.3"; if (version_ == Address::IpVersion::v6) { @@ -228,6 +254,10 @@ TEST_P(ListenSocketImplTestTcp, BindPortZero) { testBindPortZero(); } TEST_P(ListenSocketImplTestUdp, BindPortZero) { testBindPortZero(); } +TEST_P(ListenSocketImplTestTcp, NotBindToPortAccess) { testNotBindToPort(); } + +TEST_P(ListenSocketImplTestUdp, NotBindToPortAccess) { testNotBindToPort(); } + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index 1118273fdbac..1f61758acb1b 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -3,6 +3,7 @@ load( "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -20,10 +21,24 @@ envoy_cc_test( ], ) +envoy_proto_library( + name = "utility_test_protos", + srcs = [ + "utility_test_file_wip.proto", + "utility_test_file_wip_2.proto", + "utility_test_message_field_wip.proto", + ], + deps = [ + "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/annotations/v3:pkg", + ], +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], deps = [ + ":utility_test_protos_cc_proto", "//source/common/config:api_version_lib", "//source/common/protobuf:utility_lib", "//test/common/stats:stat_test_utility_lib", diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index 73dd2dbcba6b..18d6d8b81fd2 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -23,7 +23,8 @@ TEST(NullValidationVisitorImpl, UnknownField) { // The warning validation visitor logs and bumps stats on unknown fields TEST(WarningValidationVisitorImpl, UnknownField) { Stats::TestUtil::TestStore stats; - Stats::Counter& unknown_counter = stats.counter("counter"); + Stats::Counter& unknown_counter = stats.counter("unknown_counter"); + Stats::Counter& wip_counter = stats.counter("wip_counter"); WarningValidationVisitorImpl warning_validation_visitor; // we want to be executed. EXPECT_FALSE(warning_validation_visitor.skipValidation()); @@ -38,7 +39,7 @@ TEST(WarningValidationVisitorImpl, UnknownField) { warning_validation_visitor.onUnknownField("bar")); // When we set the stats counter, the above increments are transferred. EXPECT_EQ(0, unknown_counter.value()); - warning_validation_visitor.setUnknownCounter(unknown_counter); + warning_validation_visitor.setCounters(unknown_counter, wip_counter); EXPECT_EQ(2, unknown_counter.value()); // A third unknown field is tracked in stats post-initialization. EXPECT_LOG_CONTAINS("warn", "Unknown field: baz", diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 045de7bb7595..c89d4f78c500 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -21,6 +21,9 @@ #include "source/common/protobuf/utility.h" #include "source/common/runtime/runtime_impl.h" +#include "test/common/protobuf/utility_test_file_wip.pb.h" +#include "test/common/protobuf/utility_test_file_wip_2.pb.h" +#include "test/common/protobuf/utility_test_message_field_wip.pb.h" #include "test/common/stats/stat_test_utility.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -36,6 +39,7 @@ #include "absl/container/node_hash_set.h" #include "gtest/gtest.h" #include "udpa/type/v1/typed_struct.pb.h" +#include "xds/type/v3/typed_struct.pb.h" using namespace std::chrono_literals; @@ -45,30 +49,17 @@ using testing::HasSubstr; class RuntimeStatsHelper : public TestScopedRuntime { public: - RuntimeStatsHelper(bool allow_deprecated_v2_api = false) + explicit RuntimeStatsHelper() : runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), deprecated_feature_seen_since_process_start_( store_.gauge("runtime.deprecated_feature_seen_since_process_start", - Stats::Gauge::ImportMode::NeverImport)) { - if (allow_deprecated_v2_api) { - Runtime::LoaderSingleton::getExisting()->mergeValues({ - {"envoy.test_only.broken_in_production.enable_deprecated_v2_api", "true"}, - {"envoy.features.enable_all_deprecated_features", "true"}, - }); - } - } + Stats::Gauge::ImportMode::NeverImport)) {} Stats::Counter& runtime_deprecated_feature_use_; Stats::Gauge& deprecated_feature_seen_since_process_start_; }; class ProtobufUtilityTest : public testing::Test, protected RuntimeStatsHelper {}; -// TODO(htuch): During/before the v2 removal, cleanup the various examples that explicitly refer to -// v2 API protos and replace with upgrade examples not tie to the concrete API. -class ProtobufV2ApiUtilityTest : public testing::Test, protected RuntimeStatsHelper { -public: - ProtobufV2ApiUtilityTest() : RuntimeStatsHelper(true) {} -}; TEST_F(ProtobufUtilityTest, ConvertPercentNaNDouble) { envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; @@ -1009,35 +1000,40 @@ TEST_F(ProtobufUtilityTest, RedactTypedStruct) { EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } +template class TypedStructUtilityTest : public ProtobufUtilityTest {}; + +using TypedStructTypes = ::testing::Types; +TYPED_TEST_SUITE(TypedStructUtilityTest, TypedStructTypes); + // Empty `TypedStruct` can be trivially redacted. -TEST_F(ProtobufUtilityTest, RedactEmptyTypedStruct) { - udpa::type::v1::TypedStruct actual; +TYPED_TEST(TypedStructUtilityTest, RedactEmptyTypedStruct) { + TypeParam actual; TestUtility::loadFromYaml(R"EOF( type_url: type.googleapis.com/envoy.test.Sensitive )EOF", actual); - udpa::type::v1::TypedStruct expected = actual; + TypeParam expected = actual; MessageUtil::redact(actual); EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } -TEST_F(ProtobufUtilityTest, RedactTypedStructWithNoTypeUrl) { - udpa::type::v1::TypedStruct actual; +TYPED_TEST(TypedStructUtilityTest, RedactTypedStructWithNoTypeUrl) { + TypeParam actual; TestUtility::loadFromYaml(R"EOF( value: sensitive_string: This field is sensitive, but we have no way of knowing. )EOF", actual); - udpa::type::v1::TypedStruct expected = actual; + TypeParam expected = actual; MessageUtil::redact(actual); EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } // Messages packed into `TypedStruct` with unknown type URLs are skipped. -TEST_F(ProtobufUtilityTest, RedactTypedStructWithUnknownTypeUrl) { - udpa::type::v1::TypedStruct actual; +TYPED_TEST(TypedStructUtilityTest, RedactTypedStructWithUnknownTypeUrl) { + TypeParam actual; TestUtility::loadFromYaml(R"EOF( type_url: type.googleapis.com/envoy.unknown.Message value: @@ -1045,14 +1041,14 @@ type_url: type.googleapis.com/envoy.unknown.Message )EOF", actual); - udpa::type::v1::TypedStruct expected = actual; + TypeParam expected = actual; MessageUtil::redact(actual); EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } -TEST_F(ProtobufUtilityTest, RedactEmptyTypeUrlTypedStruct) { - udpa::type::v1::TypedStruct actual; - udpa::type::v1::TypedStruct expected = actual; +TYPED_TEST(TypedStructUtilityTest, RedactEmptyTypeUrlTypedStruct) { + TypeParam actual; + TypeParam expected = actual; MessageUtil::redact(actual); EXPECT_TRUE(TestUtility::protoEqual(expected, actual)); } @@ -1364,7 +1360,7 @@ TEST_F(ProtobufUtilityTest, AnyConvertAndValidateFailedValidation) { } // MessageUtility::unpackTo() with the wrong type throws. -TEST_F(ProtobufV2ApiUtilityTest, UnpackToWrongType) { +TEST_F(ProtobufUtilityTest, UnpackToWrongType) { ProtobufWkt::Duration source_duration; source_duration.set_seconds(42); ProtobufWkt::Any source_any; @@ -1591,6 +1587,76 @@ TEST(DurationUtilTest, OutOfRange) { } } +// Verify WIP accounting of the file based annotations. This test uses the strict validator to test +// that code path. +TEST_F(ProtobufUtilityTest, MessageInWipFile) { + Stats::TestUtil::TestStore stats; + Stats::Counter& wip_counter = stats.counter("wip_counter"); + ProtobufMessage::StrictValidationVisitorImpl validation_visitor; + + utility_test::file_wip::Foo foo; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.file_wip.Foo' is contained in proto file " + "'test/common/protobuf/utility_test_file_wip.proto' marked as work-in-progress. API features " + "marked as work-in-progress are not considered stable, are not covered by the threat model, " + "are not supported by the security team, and are subject to breaking changes. Do not use " + "this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo, validation_visitor)); + + EXPECT_EQ(0, wip_counter.value()); + validation_visitor.setCounters(wip_counter); + EXPECT_EQ(1, wip_counter.value()); + + utility_test::file_wip_2::Foo foo2; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.file_wip_2.Foo' is contained in proto file " + "'test/common/protobuf/utility_test_file_wip_2.proto' marked as work-in-progress. API " + "features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. " + "Do not use this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo2, validation_visitor)); + + EXPECT_EQ(2, wip_counter.value()); +} + +// Verify WIP accounting for message and field annotations. This test uses the warning validator +// to test that code path. +TEST_F(ProtobufUtilityTest, MessageWip) { + Stats::TestUtil::TestStore stats; + Stats::Counter& unknown_counter = stats.counter("unknown_counter"); + Stats::Counter& wip_counter = stats.counter("wip_counter"); + ProtobufMessage::WarningValidationVisitorImpl validation_visitor; + + utility_test::message_field_wip::Foo foo; + EXPECT_LOG_CONTAINS( + "warning", + "message 'utility_test.message_field_wip.Foo' is marked as work-in-progress. API features " + "marked as work-in-progress are not considered stable, are not covered by the threat model, " + "are not supported by the security team, and are subject to breaking changes. Do not use " + "this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(foo, validation_visitor)); + + EXPECT_EQ(0, wip_counter.value()); + validation_visitor.setCounters(unknown_counter, wip_counter); + EXPECT_EQ(1, wip_counter.value()); + + utility_test::message_field_wip::Bar bar; + EXPECT_NO_LOGS(MessageUtil::checkForUnexpectedFields(bar, validation_visitor)); + + bar.set_test_field(true); + EXPECT_LOG_CONTAINS( + "warning", + "field 'utility_test.message_field_wip.Bar.test_field' is marked as work-in-progress. API " + "features marked as work-in-progress are not considered stable, are not covered by the " + "threat model, are not supported by the security team, and are subject to breaking changes. " + "Do not use this feature without understanding each of the previous points.", + MessageUtil::checkForUnexpectedFields(bar, validation_visitor)); + + EXPECT_EQ(2, wip_counter.value()); +} + class DeprecatedFieldsTest : public testing::Test, protected RuntimeStatsHelper { protected: void checkForDeprecation(const Protobuf::Message& message) { @@ -1890,51 +1956,6 @@ TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnumGlobalOverride)) { checkForDeprecation(base)); } -// Verify that direct use of a hidden_envoy_deprecated field fails, but upgrade -// succeeds -TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(ManualDeprecatedFieldAddition)) { - // Create a base message and insert a deprecated field. When upgrading the - // deprecated field should be set as deprecated, and a warning should be logged - envoy::test::deprecation_test::Base base_should_warn = - TestUtility::parseYaml(R"EOF( - not_deprecated: field1 - is_deprecated: hidden_field1 - not_deprecated_message: - inner_not_deprecated: subfield1 - repeated_message: - - inner_not_deprecated: subfield2 - )EOF"); - - // Non-fatal checks for a deprecated field should log rather than throw an exception. - EXPECT_LOG_CONTAINS("warning", - "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", - checkForDeprecation(base_should_warn)); - EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); - EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); - - // Create an upgraded message and insert a deprecated field. This is a bypass - // of the upgrading procedure validation, and should fail - envoy::test::deprecation_test::UpgradedBase base_should_fail = - TestUtility::parseYaml(R"EOF( - not_deprecated: field1 - hidden_envoy_deprecated_is_deprecated: hidden_field1 - not_deprecated_message: - inner_not_deprecated: subfield1 - repeated_message: - - inner_not_deprecated: subfield2 - )EOF"); - - EXPECT_THROW_WITH_REGEX( - MessageUtil::checkForUnexpectedFields(base_should_fail, - ProtobufMessage::getStrictValidationVisitor()), - ProtoValidationException, - "Illegal use of hidden_envoy_deprecated_ V2 field " - "'envoy.test.deprecation_test.UpgradedBase.hidden_envoy_deprecated_is_deprecated'"); - // The config will be rejected, so the feature will not be used. - EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); - EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); -} - class TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface {}; TEST_P(TimestampUtilTest, SystemClockToTimestampTest) { diff --git a/test/common/protobuf/utility_test_file_wip.proto b/test/common/protobuf/utility_test_file_wip.proto new file mode 100644 index 000000000000..164f1b20d1aa --- /dev/null +++ b/test/common/protobuf/utility_test_file_wip.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package utility_test.file_wip; + +import "udpa/annotations/status.proto"; + +option (udpa.annotations.file_status).work_in_progress = true; + +message Foo { +} diff --git a/test/common/protobuf/utility_test_file_wip_2.proto b/test/common/protobuf/utility_test_file_wip_2.proto new file mode 100644 index 000000000000..bf3149099091 --- /dev/null +++ b/test/common/protobuf/utility_test_file_wip_2.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package utility_test.file_wip_2; + +import "xds/annotations/v3/status.proto"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +message Foo { +} diff --git a/test/common/protobuf/utility_test_message_field_wip.proto b/test/common/protobuf/utility_test_message_field_wip.proto new file mode 100644 index 000000000000..f1a2ce1c6479 --- /dev/null +++ b/test/common/protobuf/utility_test_message_field_wip.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package utility_test.message_field_wip; + +import "xds/annotations/v3/status.proto"; + +message Foo { + option (xds.annotations.v3.message_status).work_in_progress = true; +} + +message Bar { + bool test_field = 1 [(xds.annotations.v3.field_status).work_in_progress = true]; +} diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 9f92852df876..c1632e55cec7 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -217,7 +217,7 @@ class ActiveQuicListenerTest : public testing::TestWithParam { protected: void initialize() { - test_address_ = Network::Utility::resolveUrl(absl::StrCat( - "tcp://", - Network::Test::getLoopbackAddressString(TestEnvironment::getIpVersionsForTest()[0]), - ":30")); + test_address_ = Network::Utility::resolveUrl( + absl::StrCat("tcp://", Network::Test::getLoopbackAddressUrlString(GetParam()), ":30")); Ssl::ClientContextSharedPtr context{new Ssl::MockClientContext()}; EXPECT_CALL(context_.context_manager_, createSslClientContext(_, _, _)) .WillOnce(Return(context)); @@ -49,7 +48,7 @@ class QuicNetworkConnectionTest : public Event::TestUsingSimulatedTime, public t QuicStatNames quic_stat_names_{store_.symbolTable()}; }; -TEST_F(QuicNetworkConnectionTest, BufferLimits) { +TEST_P(QuicNetworkConnectionTest, BufferLimits) { initialize(); quic::QuicConfig config; @@ -68,5 +67,7 @@ TEST_F(QuicNetworkConnectionTest, BufferLimits) { client_connection->close(Network::ConnectionCloseType::NoFlush); } +INSTANTIATE_TEST_SUITE_P(IpVersions, QuicNetworkConnectionTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 56ee37b1a29c..0900cc4d9990 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -65,13 +65,12 @@ class TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection { using EnvoyQuicClientConnection::connectionStats; }; -class EnvoyQuicClientSessionTest : public testing::Test { +class EnvoyQuicClientSessionTest : public testing::TestWithParam { public: EnvoyQuicClientSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), - quic_version_([]() { return quic::CurrentSupportedHttp3Versions(); }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_({GetParam()}), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), 12345)), self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -161,7 +160,10 @@ class EnvoyQuicClientSessionTest : public testing::Test { QuicHttpClientConnectionImpl http_connection_; }; -TEST_F(EnvoyQuicClientSessionTest, NewStream) { +INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionTests, EnvoyQuicClientSessionTest, + testing::ValuesIn(quic::CurrentSupportedHttp3Versions())); + +TEST_P(EnvoyQuicClientSessionTest, NewStream) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -178,7 +180,7 @@ TEST_F(EnvoyQuicClientSessionTest, NewStream) { stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } -TEST_F(EnvoyQuicClientSessionTest, PacketLimits) { +TEST_P(EnvoyQuicClientSessionTest, PacketLimits) { // We always allow for reading packets, even if there's no stream. EXPECT_EQ(0, envoy_quic_session_.GetNumActiveStreams()); EXPECT_EQ(16, envoy_quic_session_.numPacketsExpectedPerEventLoop()); @@ -217,7 +219,7 @@ TEST_F(EnvoyQuicClientSessionTest, PacketLimits) { envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); } -TEST_F(EnvoyQuicClientSessionTest, OnResetFrame) { +TEST_P(EnvoyQuicClientSessionTest, OnResetFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -235,7 +237,7 @@ TEST_F(EnvoyQuicClientSessionTest, OnResetFrame) { ->value()); } -TEST_F(EnvoyQuicClientSessionTest, SendResetFrame) { +TEST_P(EnvoyQuicClientSessionTest, SendResetFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -252,7 +254,7 @@ TEST_F(EnvoyQuicClientSessionTest, SendResetFrame) { ->value()); } -TEST_F(EnvoyQuicClientSessionTest, OnGoAwayFrame) { +TEST_P(EnvoyQuicClientSessionTest, OnGoAwayFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; @@ -260,7 +262,7 @@ TEST_F(EnvoyQuicClientSessionTest, OnGoAwayFrame) { envoy_quic_session_.OnHttp3GoAway(4u); } -TEST_F(EnvoyQuicClientSessionTest, ConnectionClose) { +TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) { std::string error_details("dummy details"); quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA); quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, @@ -278,7 +280,7 @@ TEST_F(EnvoyQuicClientSessionTest, ConnectionClose) { ->value()); } -TEST_F(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { +TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -294,7 +296,7 @@ TEST_F(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { ->value()); } -TEST_F(EnvoyQuicClientSessionTest, HandshakeTimesOutWithActiveStream) { +TEST_P(EnvoyQuicClientSessionTest, HandshakeTimesOutWithActiveStream) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -311,7 +313,7 @@ TEST_F(EnvoyQuicClientSessionTest, HandshakeTimesOutWithActiveStream) { ->value()); } -TEST_F(EnvoyQuicClientSessionTest, ConnectionClosePopulatesQuicVersionStats) { +TEST_P(EnvoyQuicClientSessionTest, ConnectionClosePopulatesQuicVersionStats) { std::string error_details("dummy details"); quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA); quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, @@ -322,10 +324,23 @@ TEST_F(EnvoyQuicClientSessionTest, ConnectionClosePopulatesQuicVersionStats) { EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), " with details: ", error_details), envoy_quic_session_.transportFailureReason()); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); - EXPECT_EQ(1U, TestUtility::findCounter(store_, "http3.quic_version_rfc_v1")->value()); + std::string quic_version_stat_name; + switch (GetParam().transport_version) { + case quic::QUIC_VERSION_IETF_DRAFT_29: + quic_version_stat_name = "h3_29"; + break; + case quic::QUIC_VERSION_IETF_RFC_V1: + quic_version_stat_name = "rfc_v1"; + break; + default: + break; + } + EXPECT_EQ(1U, TestUtility::findCounter( + store_, absl::StrCat("http3.quic_version_", quic_version_stat_name)) + ->value()); } -TEST_F(EnvoyQuicClientSessionTest, IncomingUnidirectionalReadStream) { +TEST_P(EnvoyQuicClientSessionTest, IncomingUnidirectionalReadStream) { quic::QuicStreamId stream_id = 1u; quic::QuicStreamFrame stream_frame(stream_id, false, 0, "aaa"); envoy_quic_session_.OnStreamFrame(stream_frame); diff --git a/test/common/quic/envoy_quic_client_stream_test.cc b/test/common/quic/envoy_quic_client_stream_test.cc index 3433def5dd11..854e72166cee 100644 --- a/test/common/quic/envoy_quic_client_stream_test.cc +++ b/test/common/quic/envoy_quic_client_stream_test.cc @@ -55,8 +55,8 @@ class EnvoyQuicClientStreamTest : public testing::Test { std::unique_ptr(quic_connection_), *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2, crypto_stream_factory_), - stream_id_(4u), stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), - POOL_GAUGE_PREFIX(scope_, "http3."))}), + stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), + POOL_GAUGE_PREFIX(scope_, "http3."))}), quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL, stats_, http3_options_)), request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}}, @@ -89,7 +89,7 @@ class EnvoyQuicClientStreamTest : public testing::Test { setQuicConfigWithDefaultValues(quic_session_.config()); quic_session_.OnConfigNegotiated(); - quic_connection_->setUpConnectionSocket(delegate_); + quic_connection_->setUpConnectionSocket(*quic_connection_->connectionSocket(), delegate_); spdy_response_headers_[":status"] = "200"; spdy_trailers_["key1"] = "value1"; @@ -136,7 +136,7 @@ class EnvoyQuicClientStreamTest : public testing::Test { EnvoyQuicClientConnection* quic_connection_; TestQuicCryptoClientStreamFactory crypto_stream_factory_; MockEnvoyQuicClientSession quic_session_; - quic::QuicStreamId stream_id_; + quic::QuicStreamId stream_id_{4u}; Stats::IsolatedStoreImpl scope_; Http::Http3::CodecStats stats_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index 9ef2e3fe5470..b481768eb91f 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -85,11 +85,12 @@ class SignatureVerifier { EXPECT_TRUE(cert_view->VerifySignature(payload, signature, sign_alg)); std::string error; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_SUCCESS, verifier_->VerifyCertChain("www.example.org", 54321, chain->certs, /*ocsp_response=*/"", /*cert_sct=*/"Fake SCT", - /*context=*/nullptr, &error, - /*details=*/nullptr, /*out_alert=*/nullptr, + /*context=*/nullptr, &error, &verify_details, + /*out_alert=*/nullptr, /*callback=*/nullptr)) << error; } diff --git a/test/common/quic/envoy_quic_proof_verifier_test.cc b/test/common/quic/envoy_quic_proof_verifier_test.cc index 21b7be3136a7..dea9daa038f5 100644 --- a/test/common/quic/envoy_quic_proof_verifier_test.cc +++ b/test/common/quic/envoy_quic_proof_verifier_test.cc @@ -7,6 +7,7 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/test_time.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -78,7 +79,7 @@ class EnvoyQuicProofVerifierTest : public testing::Test { const std::string empty_string_; const std::vector empty_string_list_; const std::string cert_chain_{quic::test::kTestCertificateChainPem}; - const std::string root_ca_cert_; + std::string root_ca_cert_; const std::string leaf_cert_; const absl::optional custom_validator_config_{ absl::nullopt}; @@ -96,11 +97,16 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainSuccess) { const std::string ocsp_response; const std::string cert_sct; std::string error_details; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_SUCCESS, verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, {leaf_cert_}, ocsp_response, cert_sct, nullptr, - &error_details, nullptr, nullptr, nullptr)) + &error_details, &verify_details, nullptr, nullptr)) << error_details; + EXPECT_NE(verify_details, nullptr); + EXPECT_TRUE(static_cast(*verify_details).isValid()); + std::unique_ptr cloned(static_cast(verify_details->Clone())); + EXPECT_TRUE(cloned->isValid()); } TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) { @@ -110,13 +116,22 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) { const std::string ocsp_response; const std::string cert_sct; std::string error_details; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_FAILURE, verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, {leaf_cert_}, ocsp_response, cert_sct, nullptr, - &error_details, nullptr, nullptr, nullptr)) + &error_details, &verify_details, nullptr, nullptr)) << error_details; EXPECT_EQ("X509_verify_cert: certificate verification error at depth 1: certificate has expired", error_details); + EXPECT_NE(verify_details, nullptr); + EXPECT_FALSE(static_cast(*verify_details).isValid()); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidCA) { + root_ca_cert_ = "invalid root CA"; + EXPECT_THROW_WITH_REGEX(configCertVerificationDetails(true), EnvoyException, + "Failed to load trusted CA certificates from"); } TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) { @@ -125,9 +140,10 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) { const std::string cert_sct; std::string error_details; const std::vector certs{"invalid leaf cert"}; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_FAILURE, verifier_->VerifyCertChain("www.google.com", 54321, certs, ocsp_response, cert_sct, - nullptr, &error_details, nullptr, nullptr, nullptr)); + nullptr, &error_details, &verify_details, nullptr, nullptr)); EXPECT_EQ("d2i_X509: fail to parse DER", error_details); } @@ -139,10 +155,11 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureLeafCertWithGarbage) { const std::string cert_sct; std::string cert_with_trailing_garbage = absl::StrCat(leaf_cert_, "AAAAAA"); std::string error_details; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_FAILURE, verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, {cert_with_trailing_garbage}, ocsp_response, cert_sct, - nullptr, &error_details, nullptr, nullptr, nullptr)) + nullptr, &error_details, &verify_details, nullptr, nullptr)) << error_details; EXPECT_EQ("There is trailing garbage in DER.", error_details); } @@ -152,9 +169,10 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) { const std::string ocsp_response; const std::string cert_sct; std::string error_details; + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_FAILURE, verifier_->VerifyCertChain("unknown.org", 54321, {leaf_cert_}, ocsp_response, cert_sct, - nullptr, &error_details, nullptr, nullptr, nullptr)) + nullptr, &error_details, &verify_details, nullptr, nullptr)) << error_details; EXPECT_EQ("Leaf certificate doesn't match hostname: unknown.org", error_details); } @@ -186,11 +204,84 @@ VdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA std::unique_ptr cert_view = quic::CertificateView::ParseSingleCertificate(chain[0]); ASSERT(cert_view); + std::unique_ptr verify_details; EXPECT_EQ(quic::QUIC_FAILURE, verifier_->VerifyCertChain("www.google.com", 54321, chain, ocsp_response, cert_sct, - nullptr, &error_details, nullptr, nullptr, nullptr)); + nullptr, &error_details, &verify_details, nullptr, nullptr)); EXPECT_EQ("Invalid leaf cert, only P-256 ECDSA certificates are supported", error_details); } +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureNonServerAuthEKU) { + // Override the CA cert with cert copied from test/config/integration/certs/cacert.pem. + root_ca_cert_ = R"(-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIUdCu/mLip3X/We37vh3BA9u/nxakwDQYJKoZIhvcNAQEL +BQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM +DVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n +aW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjAwODA1MTkxNjAwWhcNMjIw +ODA1MTkxNjAwWjB2MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEW +MBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwETHlmdDEZMBcGA1UECwwQ +THlmdCBFbmdpbmVlcmluZzEQMA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBALu2Ihi4DmaQG7zySZlWyM9SjxOXCI5840V7Hn0C +XoiI8sQQmKSC2YCzsaphQoJ0lXCi6Y47o5FkooYyLeNDQTGS0nh+IWm5RCyochtO +fnaKPv/hYxhpyFQEwkJkbF1Zt1s6j2rq5MzmbWZx090uXZEE82DNZ9QJaMPu6VWt +iwGoGoS5HF5HNlUVxLNUsklNH0ZfDafR7/LC2ty1vO1c6EJ6yCGiyJZZ7Ilbz27Q +HPAUd8CcDNKCHZDoMWkLSLN3Nj1MvPVZ5HDsHiNHXthP+zV8FQtloAuZ8Srsmlyg +rJREkc7gF3f6HrH5ShNhsRFFc53NUjDbYZuha1u4hiOE8lcCAwEAAaNjMGEwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJZL2ixTtL6V +xpNz4qekny4NchiHMB8GA1UdIwQYMBaAFJZL2ixTtL6VxpNz4qekny4NchiHMA0G +CSqGSIb3DQEBCwUAA4IBAQAcgG+AaCdrUFEVJDn9UsO7zqzQ3c1VOp+WAtAU8OQK +Oc4vJYVVKpDs8OZFxmukCeqm1gz2zDeH7TfgCs5UnLtkplx1YO1bd9qvserJVHiD +LAK+Yl24ZEbrHPaq0zI1RLchqYUOGWmi51pcXi1gsfc8DQ3GqIXoai6kYJeV3jFJ +jxpQSR32nx6oNN/6kVKlgmBjlWrOy7JyDXGim6Z97TzmS6Clctewmw/5gZ9g+M8e +g0ZdFbFkNUjzSNm44hiDX8nR6yJRn+gLaARaJvp1dnT+MlvofZuER17WYKH4OyMs +ie3qKR3an4KC20CtFbpZfv540BVuTTOCtQ5xqZ/LTE78 +-----END CERTIFICATE-----)"; + configCertVerificationDetails(true); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + // This is a cert generated with the test/config/integration/certs/certs.sh. And the config that + // used to generate this cert is same as test/config/integration/certs/servercert.cfg but with + // 'extKeyUsage: clientAuth'. + const std::string certs{R"(-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUWzmfQSTX8xfzUzdByjCjCJN8E/wwDQYJKoZIhvcNAQEL +BQAwdjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcM +DVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsMEEx5ZnQgRW5n +aW5lZXJpbmcxEDAOBgNVBAMMB1Rlc3QgQ0EwHhcNMjEwOTI5MTY0NTM3WhcNMjMw +OTI5MTY0NTM3WjCBpjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx +FjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBEx5ZnQxGTAXBgNVBAsM +EEx5ZnQgRW5naW5lZXJpbmcxGjAYBgNVBAMMEVRlc3QgQmFja2VuZCBUZWFtMSQw +IgYJKoZIhvcNAQkBFhViYWNrZW5kLXRlYW1AbHlmdC5jb20wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9JgaI7hxjPM0tsUna/QmivBdKbCrLnLW9Teak +RH/Ebg68ovyvrRIlybDT6XhKi+iVpzVY9kqxhGHgrFDgGLBakVMiYJ5EjIgHfoo4 +UUAHwIYbunJluYCgANzpprBsvTC/yFYDVMqUrjvwHsoYYVm36io994k9+t813b70 +o0l7/PraBsKkz8NcY2V2mrd/yHn/0HAhv3hl6iiJme9yURuDYQrae2ACSrQtsbel +KwdZ/Re71Z1awz0OQmAjMa2HuCop+Q/1QLnqBekT5+DH1qKUzJ3Jkq6NRkERXOpi +87j04rtCBteCogrO67qnuBZ2lH3jYEMb+lQdLkyNMLltBSdLAgMBAAGjgbYwgbMw +DAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBeAwEwYDVR0lBAwwCgYIKwYBBQUHAwIw +QQYDVR0RBDowOIYec3BpZmZlOi8vbHlmdC5jb20vYmFja2VuZC10ZWFtgghseWZ0 +LmNvbYIMd3d3Lmx5ZnQuY29tMB0GA1UdDgQWBBTZdxNltzTEpl+A1UpK8BsxkkIG +hjAfBgNVHSMEGDAWgBSWS9osU7S+lcaTc+KnpJ8uDXIYhzANBgkqhkiG9w0BAQsF +AAOCAQEAhiXkQJZ53L3uoQMX6xNhAFThomirnLm2RT10kPIbr5mmf3wcR8+EKrWX +dWCj56bk1tSDbQZqx33DSGbhvNaydggbo69Pkie5b7J9O7AWzT21NME6Jis9hHED +VUI63L+7SgJ2oZs0o8xccUaLFeknuNdQL4qUEwhMwCC8kYLz+c6g0qwDwZi1MtdL +YR4qm2S6KveVPGzBHpUjfWf/whSCM3JN5Fm8gWfC6d6XEYz6z1dZrj3lpwmhRgF6 +Wb72f68jzCQ3BFqKRFsJI2xz3EP6PoQ+e6EQjMpjQLomxIhIN/aTsgrKwA5wf6vQ +ZCFbredVxDBZuoVsfrKPSQa407Jj1Q== +-----END CERTIFICATE-----)"}; + std::stringstream pem_stream(certs); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(chain[0]); + ASSERT(cert_view); + std::unique_ptr verify_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("lyft.com", 54321, chain, ocsp_response, cert_sct, nullptr, + &error_details, &verify_details, nullptr, nullptr)); + EXPECT_EQ("X509_verify_cert: certificate verification error at depth 0: unsupported certificate " + "purpose", + error_details); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index a8bcadbaf4b1..102461ba738f 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -64,6 +64,8 @@ class TestEnvoyQuicServerSession : public EnvoyQuicServerSession { // behavior. return false; } + + using EnvoyQuicServerSession::GetCryptoStream; }; class ProofSourceDetailsSetter { @@ -290,6 +292,8 @@ TEST_F(EnvoyQuicServerSessionTest, NewStreamBeforeInitializingFilter) { TEST_F(EnvoyQuicServerSessionTest, NewStream) { installReadFilter(); + EXPECT_EQ(envoy_quic_session_.GetCryptoStream()->GetSsl(), + static_cast(*envoy_quic_session_.ssl()).ssl()); Http::MockRequestDecoder request_decoder; EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) .WillOnce(testing::ReturnRef(request_decoder)); diff --git a/test/common/quic/envoy_quic_utils_test.cc b/test/common/quic/envoy_quic_utils_test.cc index 63d9393e42c0..99794c49700d 100644 --- a/test/common/quic/envoy_quic_utils_test.cc +++ b/test/common/quic/envoy_quic_utils_test.cc @@ -63,18 +63,27 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { // converting to Envoy headers. headers_block.AppendValueOrAddHeader("key", "value1"); headers_block.AppendValueOrAddHeader("key", "value2"); + headers_block.AppendValueOrAddHeader("key1", "value1"); + headers_block.AppendValueOrAddHeader("key1", ""); + headers_block.AppendValueOrAddHeader("key1", "value2"); NiceMock validator; absl::string_view details; quic::QuicRstStreamErrorCode rst = quic::QUIC_REFUSED_STREAM; auto envoy_headers = spdyHeaderBlockToEnvoyTrailers( headers_block, 100, validator, details, rst); - // Envoy header block is 1 header larger because QUICHE header block does coalescing. - EXPECT_EQ(headers_block.size() + 1u, envoy_headers->size()); + // Envoy header block is 3 headers larger because QUICHE header block does coalescing. + EXPECT_EQ(headers_block.size() + 3u, envoy_headers->size()); EXPECT_EQ("www.google.com", envoy_headers->getHostValue()); EXPECT_EQ("/index.hml", envoy_headers->getPathValue()); EXPECT_EQ("https", envoy_headers->getSchemeValue()); EXPECT_EQ("value1", envoy_headers->get(Http::LowerCaseString("key"))[0]->value().getStringView()); EXPECT_EQ("value2", envoy_headers->get(Http::LowerCaseString("key"))[1]->value().getStringView()); + EXPECT_EQ("value1", + envoy_headers->get(Http::LowerCaseString("key1"))[0]->value().getStringView()); + EXPECT_EQ("", envoy_headers->get(Http::LowerCaseString("key1"))[1]->value().getStringView()); + EXPECT_EQ("value2", + envoy_headers->get(Http::LowerCaseString("key1"))[2]->value().getStringView()); + EXPECT_EQ(rst, quic::QUIC_REFUSED_STREAM); // With no error it will be untouched. quic::QuicHeaderList quic_headers; @@ -84,6 +93,9 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { quic_headers.OnHeader(":scheme", "https"); quic_headers.OnHeader("key", "value1"); quic_headers.OnHeader("key", "value2"); + quic_headers.OnHeader("key1", "value1"); + quic_headers.OnHeader("key1", ""); + quic_headers.OnHeader("key1", "value2"); quic_headers.OnHeader("key-to-drop", ""); quic_headers.OnHeaderBlockEnd(0, 0); EXPECT_CALL(validator, validateHeader(_, _)) @@ -153,5 +165,32 @@ TEST(EnvoyQuicUtilsTest, TrailerCharacters) { EXPECT_EQ(rst, quic::QUIC_BAD_APPLICATION_PAYLOAD); } +TEST(EnvoyQuicUtilsTest, deduceSignatureAlgorithmFromNullPublicKey) { + std::string error; + EXPECT_EQ(0, deduceSignatureAlgorithmFromPublicKey(nullptr, &error)); + EXPECT_EQ("Invalid leaf cert, bad public key", error); +} + +TEST(EnvoyQuicUtilsTest, ConvertQuicConfig) { + envoy::config::core::v3::QuicProtocolOptions config; + quic::QuicConfig quic_config; + + // Test defaults. + convertQuicConfig(config, quic_config); + EXPECT_EQ(100, quic_config.GetMaxBidirectionalStreamsToSend()); + EXPECT_EQ(100, quic_config.GetMaxUnidirectionalStreamsToSend()); + EXPECT_EQ(16777216, quic_config.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); + EXPECT_EQ(25165824, quic_config.GetInitialSessionFlowControlWindowToSend()); + + // Test converting values. + config.mutable_max_concurrent_streams()->set_value(2); + config.mutable_initial_stream_window_size()->set_value(3); + config.mutable_initial_connection_window_size()->set_value(50); + convertQuicConfig(config, quic_config); + EXPECT_EQ(2, quic_config.GetMaxBidirectionalStreamsToSend()); + EXPECT_EQ(2, quic_config.GetMaxUnidirectionalStreamsToSend()); + EXPECT_EQ(3, quic_config.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index c540db9e123f..30e4ec1d4fe4 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -1215,6 +1215,168 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { "virtual clusters must define 'headers'"); } +// Validates basic usage of the match tree to resolve route actions. +TEST_F(RouteMatcherTest, TestMatchTree) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: :path + exact_match_map: + map: + "/new_endpoint/foo": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: root_ww2 + request_headers_to_add: + - header: + key: x-route-header + value: match_tree + "/new_endpoint/bar": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: root_ww2 + request_headers_to_add: + - header: + key: x-route-header + value: match_tree_2 + "/new_endpoint/baz": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: /something/else + route: + cluster: root_ww2 + request_headers_to_add: + - header: + key: x-route-header + value: match_tree_2 + )EOF"; + + NiceMock stream_info; + factory_context_.cluster_manager_.initializeClusters( + {"www2", "root_www2", "www2_staging", "instant-server"}, {}); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + { + Http::TestRequestHeaderMapImpl headers = genHeaders("lyft.com", "/new_endpoint/foo", "GET"); + const RouteEntry* route = config.route(headers, 0)->routeEntry(); + route->finalizeRequestHeaders(headers, stream_info, true); + EXPECT_EQ("match_tree", headers.get_("x-route-header")); + } + + { + Http::TestRequestHeaderMapImpl headers = genHeaders("lyft.com", "/new_endpoint/bar", "GET"); + const RouteEntry* route = config.route(headers, 0)->routeEntry(); + route->finalizeRequestHeaders(headers, stream_info, true); + EXPECT_EQ("match_tree_2", headers.get_("x-route-header")); + } + Http::TestRequestHeaderMapImpl headers = genHeaders("lyft.com", "/new_endpoint/baz", "GET"); + EXPECT_EQ(nullptr, config.route(headers, 0)); +} + +// Validates that we fail creating a route config if an invalid data input is used. +TEST_F(RouteMatcherTest, TestMatchInvalidInput) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpResponseHeaderMatchInput + header_name: :path + exact_match_map: + map: + "/new_endpoint/foo": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: root_ww2 + request_headers_to_add: + - header: + key: x-route-header + value: match_tree + )EOF"; + + NiceMock stream_info; + factory_context_.cluster_manager_.initializeClusters( + {"www2", "root_www2", "www2_staging", "instant-server"}, {}); + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "requirement violation while creating route match tree: INVALID_ARGUMENT: Route table can " + "only match on request headers, saw " + "type.googleapis.com/envoy.type.matcher.v3.HttpResponseHeaderMatchInput"); +} + +// Validates that we fail creating a route config if an invalid data input is used. +TEST_F(RouteMatcherTest, TestMatchInvalidInputTwoMatchers) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: www2 + domains: + - lyft.com + routes: + - match: { prefix: "/" } + route: { cluster: "regex" } + matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: :path + exact_match_map: + map: + "/new_endpoint/foo": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: root_ww2 + request_headers_to_add: + - header: + key: x-route-header + value: match_tree + )EOF"; + + NiceMock stream_info; + factory_context_.cluster_manager_.initializeClusters( + {"www2", "root_www2", "www2_staging", "instant-server"}, {}); + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "cannot set both matcher and routes on virtual host"); +} + // Validates behavior of request_headers_to_add at router, vhost, and route levels. TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { const std::string yaml = R"EOF( @@ -1710,29 +1872,6 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddNoHostOrPseudoHeader) { } } -TEST_F(RouteMatcherTest, TestRequestHeadersToAddLegacyHostHeader) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.treat_host_like_authority", "false"}}); - - const std::string yaml = R"EOF( -virtual_hosts: - - name: www2 - domains: ["*"] - request_headers_to_add: - - header: - key: "host" - value: vhost-www2 - append: false -)EOF"; - - NiceMock stream_info; - - envoy::config::route::v3::RouteConfiguration route_config = parseRouteConfigurationFromYaml(yaml); - - EXPECT_NO_THROW(TestConfigImpl config(route_config, factory_context_, true)); -} - // Validate that we can't remove :-prefixed request headers. TEST_F(RouteMatcherTest, TestRequestHeadersToRemoveNoPseudoHeader) { for (const std::string& header : @@ -7854,50 +7993,6 @@ TEST_F(PerFilterConfigsTest, UnknownFilterAny) { "Didn't find a registered implementation for name: 'unknown.filter'"); } -TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyPerVirtualHost) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.check_unsupported_typed_per_filter_config", "false"}}); - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - typed_per_filter_config: - test.default.filter: - "@type": type.googleapis.com/google.protobuf.Struct - value: - seconds: 123 -)EOF"; - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkNoPerFilterConfig(yaml); -} - -TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyPerRoute) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.check_unsupported_typed_per_filter_config", "false"}}); - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - typed_per_filter_config: - test.default.filter: - "@type": type.googleapis.com/google.protobuf.Struct - value: - seconds: 123 -)EOF"; - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkNoPerFilterConfig(yaml); -} - TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyWithCheckPerVirtualHost) { const std::string yaml = R"EOF( virtual_hosts: diff --git a/test/common/router/router_2_test.cc b/test/common/router/router_2_test.cc index 4b6b9ddd7288..4d37b31004f2 100644 --- a/test/common/router/router_2_test.cc +++ b/test/common/router/router_2_test.cc @@ -199,7 +199,7 @@ TEST_F(WatermarkTest, UpstreamWatermarks) { .value()); Buffer::OwnedImpl data; - EXPECT_CALL(encoder_, getStream()).Times(2).WillRepeatedly(ReturnRef(stream_)); + EXPECT_CALL(encoder_, getStream()).WillOnce(ReturnRef(stream_)); response_decoder_->decodeData(data, true); } diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 1ff5661d20e0..ef956636f488 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -412,43 +412,6 @@ TEST_F(RouterTest, PoolFailureDueToConnectTimeout) { "upstream_reset_before_response_started{connection failure,connect_timeout}"); } -TEST_F(RouterTest, PoolFailureDueToConnectTimeoutLegacy) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure", "false"}}); - ON_CALL(callbacks_.route_->route_entry_, priority()) - .WillByDefault(Return(Upstream::ResourcePriority::High)); - EXPECT_CALL(cm_.thread_local_cluster_, - httpConnPool(Upstream::ResourcePriority::High, _, &router_)); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks) - -> Http::ConnectionPool::Cancellable* { - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Timeout, "connect_timeout", - cm_.thread_local_cluster_.conn_pool_.host_); - return nullptr; - })); - - Http::TestResponseHeaderMapImpl response_headers{ - {":status", "503"}, {"content-length", "127"}, {"content-type", "text/plain"}}; - EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); - EXPECT_CALL(callbacks_, encodeData(_, true)); - EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::LocalReset)); - EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) - .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { - EXPECT_EQ(host_address_, host->address()); - })); - - Http::TestRequestHeaderMapImpl headers; - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - // Pool failure, so upstream request was not initiated. - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details(), - "upstream_reset_before_response_started{local reset,connect_timeout}"); -} - TEST_F(RouterTest, Http1Upstream) { EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, absl::optional(), _)); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index 612790a8ee53..8722caaa5b80 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -44,7 +44,7 @@ name: accesslog log_format: text_format_source: inline_string: "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% %RESPONSE_CODE% - %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %REQ(:AUTHORITY)% %UPSTREAM_HOST% + %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% %UPSTREAM_WIRE_BYTES_SENT% %REQ(:AUTHORITY)% %UPSTREAM_HOST% %UPSTREAM_LOCAL_ADDRESS% %RESP(X-UPSTREAM-HEADER)% %TRAILER(X-TRAILER)%\n" path: "/dev/null" )EOF"; @@ -232,7 +232,9 @@ class RouterUpstreamLogTest : public testing::Test { new Http::TestResponseHeaderMapImpl{{":status", "200"}}); EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); - response_decoder->decodeHeaders(std::move(response_headers), true); + if (response_decoder != nullptr) { + response_decoder->decodeHeaders(std::move(response_headers), true); + } } std::vector output_; @@ -268,7 +270,8 @@ TEST_F(RouterUpstreamLogTest, LogSingleTry) { run(); EXPECT_EQ(output_.size(), 1U); - EXPECT_EQ(output_.front(), "GET / HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); + EXPECT_EQ(output_.front(), + "GET / HTTP/1.0 200 - 0 0 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); } TEST_F(RouterUpstreamLogTest, LogRetries) { @@ -276,8 +279,8 @@ TEST_F(RouterUpstreamLogTest, LogRetries) { runWithRetry(); EXPECT_EQ(output_.size(), 2U); - EXPECT_EQ(output_.front(), "GET / HTTP/1.0 0 UT 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); - EXPECT_EQ(output_.back(), "GET / HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10212 - -\n"); + EXPECT_EQ(output_.front(), "GET / HTTP/1.0 0 UT 0 0 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); + EXPECT_EQ(output_.back(), "GET / HTTP/1.0 200 - 0 0 0 0 host 10.0.0.5:9211 10.0.0.5:10212 - -\n"); } TEST_F(RouterUpstreamLogTest, LogFailure) { @@ -285,7 +288,8 @@ TEST_F(RouterUpstreamLogTest, LogFailure) { run(503, {}, {}, {}); EXPECT_EQ(output_.size(), 1U); - EXPECT_EQ(output_.front(), "GET / HTTP/1.0 503 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); + EXPECT_EQ(output_.front(), + "GET / HTTP/1.0 503 - 0 0 0 0 host 10.0.0.5:9211 10.0.0.5:10211 - -\n"); } TEST_F(RouterUpstreamLogTest, LogHeaders) { @@ -295,7 +299,7 @@ TEST_F(RouterUpstreamLogTest, LogHeaders) { EXPECT_EQ(output_.size(), 1U); EXPECT_EQ(output_.front(), - "GET /foo HTTP/1.0 200 - 0 0 host 10.0.0.5:9211 10.0.0.5:10211 abcdef value\n"); + "GET /foo HTTP/1.0 200 - 0 0 0 0 host 10.0.0.5:9211 10.0.0.5:10211 abcdef value\n"); } // Test timestamps and durations are emitted. diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index f1cb48f1da7b..3b9bc7120703 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -113,6 +113,214 @@ class ScopedRoutesTestBase : public testing::Test { NiceMock event_dispatcher_; }; +using InlineScopedRoutesTest = ScopedRoutesTestBase; + +constexpr char hcm_config_base[] = R"EOF( +codec_type: auto +stat_prefix: foo +http_filters: + - name: http_dynamo_filter + typed_config: +)EOF"; + +TEST_F(InlineScopedRoutesTest, RouteConfigurationNameNotSupported) { + const std::string hcm_config = absl::StrCat(hcm_config_base, R"EOF( +scoped_routes: + name: foo-scoped-routes + scope_key_builder: + fragments: + - header_value_extractor: + name: addr + element_separator: "," + index: 0 + scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + route_configuration_name: foo-route + key: + fragments: { string_key: foo-key } + - name: foo2-scope + route_configuration: + name: foo2 + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: foo-key-2 } +)EOF"); + + EXPECT_THROW_WITH_REGEX( + ScopedRoutesConfigProviderUtil::create(parseHttpConnectionManagerFromYaml(hcm_config), + server_factory_context_, context_init_manager_, "foo.", + *config_provider_manager_), + EnvoyException, "Fetching routes via RDS \\(route_configuration_name\\) is not supported"); +} + +TEST_F(InlineScopedRoutesTest, RouteConfigurationRequired) { + const std::string hcm_config = absl::StrCat(hcm_config_base, R"EOF( +scoped_routes: + name: foo-scoped-routes + scope_key_builder: + fragments: + - header_value_extractor: + name: addr + element_separator: "," + index: 0 + scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + key: + fragments: { string_key: foo-key } +)EOF"); + + EXPECT_THROW_WITH_MESSAGE( + Envoy::Config::ConfigProviderPtr provider = ScopedRoutesConfigProviderUtil::create( + parseHttpConnectionManagerFromYaml(hcm_config), server_factory_context_, + context_init_manager_, "foo.", *config_provider_manager_), + EnvoyException, "You must specify a route_configuration with inline scoped routes."); +} + +TEST_F(InlineScopedRoutesTest, InlineRouteConfigurations) { + server_factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); + const std::string hcm_config = absl::StrCat(hcm_config_base, R"EOF( +scoped_routes: + name: $0 + scope_key_builder: + fragments: + - header_value_extractor: + name: addr + element_separator: "," + index: 0 + scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: foo-key } + - name: foo2-scope + route_configuration: + name: foo2 + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: foo-key-2 } +)EOF"); + Envoy::Config::ConfigProviderPtr provider = ScopedRoutesConfigProviderUtil::create( + parseHttpConnectionManagerFromYaml(absl::Substitute(hcm_config, "foo-scoped-routes")), + server_factory_context_, context_init_manager_, "foo.", *config_provider_manager_); + ASSERT_THAT(provider->config(), Not(IsNull())); + EXPECT_EQ(provider->config() + ->getRouteConfig(TestRequestHeaderMapImpl{{"addr", "foo-key"}}) + ->name(), + "foo"); + EXPECT_EQ(provider->config() + ->getRouteConfig(TestRequestHeaderMapImpl{{"addr", "foo-key-2"}}) + ->name(), + "foo2"); + EXPECT_EQ(provider->config() + ->getRouteConfig(TestRequestHeaderMapImpl{{"addr", "foo-key,foo-key-2"}}) + ->name(), + "foo"); +} + +TEST_F(InlineScopedRoutesTest, ConfigLoadAndDump) { + server_factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); + timeSystem().setSystemTime(std::chrono::milliseconds(1234567891234)); + const std::string hcm_config = absl::StrCat(hcm_config_base, R"EOF( +scoped_routes: + name: $0 + scope_key_builder: + fragments: + - header_value_extractor: + name: Addr + index: 0 + scoped_route_configurations_list: + scoped_route_configurations: + - name: foo + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: "172.10.10.10" } + - name: foo2 + route_configuration: + name: foo2 + virtual_hosts: + - name: bar2 + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: "172.10.10.20" } +)EOF"); + Envoy::Config::ConfigProviderPtr inline_config_provider = ScopedRoutesConfigProviderUtil::create( + parseHttpConnectionManagerFromYaml(absl::Substitute(hcm_config, "foo-scoped-routes")), + server_factory_context_, context_init_manager_, "foo.", *config_provider_manager_); + UniversalStringMatcher universal_matcher; + ProtobufTypes::MessagePtr message = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); + const auto& scoped_routes_config_dump = + TestUtility::downcastAndValidate(*message); + + envoy::admin::v3::ScopedRoutesConfigDump expected_config_dump; + TestUtility::loadFromYaml(R"EOF( +inline_scoped_route_configs: + - name: foo-scoped-routes + scoped_route_configs: + - name: foo + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: "172.10.10.10" } + - name: foo2 + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration + route_configuration: + name: foo2 + virtual_hosts: + - name: bar2 + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: "172.10.10.20" } + last_updated: + seconds: 1234567891 + nanos: 234000000 +dynamic_scoped_route_configs: +)EOF", + expected_config_dump); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump)); +} + class ScopedRdsTest : public ScopedRoutesTestBase { protected: void setup(const OptionalHttpFilters optional_http_filters = OptionalHttpFilters()) { @@ -252,6 +460,25 @@ name: foo_scoped_routes "foo.scoped_rds.foo_scoped_routes.on_demand_scopes", Stats::Gauge::ImportMode::Accumulate)}; }; +TEST_F(ScopedRdsTest, EmptyRouteConfigurationNameFailsConfigUpdate) { + setup(); + init_watcher_.expectReady().Times(0); + const std::string config_yaml = R"EOF( +name: foo_scope +key: + fragments: + - string_key: x-foo-key +)EOF"; + const envoy::config::route::v3::ScopedRouteConfiguration resource = + parseScopedRouteConfigurationFromYaml(config_yaml); + const Envoy::Config::DecodedResourcesWrapper decoded_resources = + TestUtility::decodeResources({resource}); + context_init_manager_.initialize(init_watcher_); + + EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), + EnvoyException, "route_configuration_name is empty."); +} + // Test an exception will be throw when unknown factory in the per-virtualhost typed config. TEST_F(ScopedRdsTest, UnknownFactoryForPerVirtualHostTypedConfig) { setup(); @@ -791,52 +1018,7 @@ stat_prefix: foo index: 0 $1 )EOF"; - const std::string inline_scoped_route_configs_yaml = R"EOF( - scoped_route_configurations_list: - scoped_route_configurations: - - name: foo - route_configuration_name: foo-route-config - key: - fragments: { string_key: "172.10.10.10" } - - name: foo2 - route_configuration_name: foo-route-config2 - key: - fragments: { string_key: "172.10.10.20" } -)EOF"; - // Only load the inline scopes. - Envoy::Config::ConfigProviderPtr inline_config = ScopedRoutesConfigProviderUtil::create( - parseHttpConnectionManagerFromYaml(absl::Substitute(hcm_base_config_yaml, "foo-scoped-routes", - inline_scoped_route_configs_yaml)), - server_factory_context_, context_init_manager_, "foo.", *config_provider_manager_); - message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( - universal_matcher); - const auto& scoped_routes_config_dump2 = - TestUtility::downcastAndValidate( - *message_ptr); - TestUtility::loadFromYaml(R"EOF( -inline_scoped_route_configs: - - name: foo-scoped-routes - scoped_route_configs: - - name: foo - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config - key: - fragments: { string_key: "172.10.10.10" } - - name: foo2 - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config2 - key: - fragments: { string_key: "172.10.10.20" } - last_updated: - seconds: 1234567891 - nanos: 234000000 -dynamic_scoped_route_configs: -)EOF", - expected_config_dump); - EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump2)); - // Now SRDS kicks off. Protobuf::RepeatedPtrField resources; const auto resource = parseScopedRouteConfigurationFromYaml(R"EOF( name: dynamic-foo @@ -844,28 +1026,12 @@ route_configuration_name: dynamic-foo-route-config key: fragments: { string_key: "172.30.30.10" } )EOF"); - timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); const auto decoded_resources = TestUtility::decodeResources({resource}); srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - - name: foo-scoped-routes - scoped_route_configs: - - name: foo - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config - key: - fragments: { string_key: "172.10.10.10" } - - name: foo2 - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config2 - key: - fragments: { string_key: "172.10.10.20" } - last_updated: - seconds: 1234567891 - nanos: 234000000 dynamic_scoped_route_configs: - name: foo_scoped_routes scoped_route_configs: @@ -889,21 +1055,9 @@ route_configuration_name: dynamic-foo-route-config EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump3)); NiceMock mock_matcher; - EXPECT_CALL(mock_matcher, match("foo")).WillOnce(Return(true)); - EXPECT_CALL(mock_matcher, match("foo2")).WillOnce(Return(false)); EXPECT_CALL(mock_matcher, match("dynamic-foo")).WillOnce(Return(false)); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - - name: foo-scoped-routes - scoped_route_configs: - - name: foo - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config - key: - fragments: { string_key: "172.10.10.10" } - last_updated: - seconds: 1234567891 - nanos: 234000000 dynamic_scoped_route_configs: - name: foo_scoped_routes last_updated: @@ -920,15 +1074,9 @@ route_configuration_name: dynamic-foo-route-config *message_ptr); EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump4)); - EXPECT_CALL(mock_matcher, match("foo")).WillOnce(Return(false)); - EXPECT_CALL(mock_matcher, match("foo2")).WillOnce(Return(false)); EXPECT_CALL(mock_matcher, match("dynamic-foo")).WillOnce(Return(true)); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - - name: foo-scoped-routes - last_updated: - seconds: 1234567891 - nanos: 234000000 dynamic_scoped_route_configs: - name: foo_scoped_routes scoped_route_configs: @@ -953,22 +1101,68 @@ route_configuration_name: dynamic-foo-route-config srds_subscription_->onConfigUpdate({}, "2"); TestUtility::loadFromYaml(R"EOF( +inline_scoped_route_configs: +dynamic_scoped_route_configs: + - name: foo_scoped_routes + last_updated: + seconds: 1234567891 + nanos: 567000000 + version_info: "2" +)EOF", + expected_config_dump); + message_ptr = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); + const auto& scoped_routes_config_dump6 = + TestUtility::downcastAndValidate( + *message_ptr); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump6)); + + const std::string inline_scoped_route_configs_yaml = R"EOF( + scoped_route_configurations_list: + scoped_route_configurations: + - name: foo + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } + key: + fragments: { string_key: "172.10.10.10" } +)EOF"; + server_factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); + Envoy::Config::ConfigProviderPtr inline_config = ScopedRoutesConfigProviderUtil::create( + parseHttpConnectionManagerFromYaml(absl::Substitute(hcm_base_config_yaml, "foo-scoped-routes", + inline_scoped_route_configs_yaml)), + server_factory_context_, context_init_manager_, "foo.", *config_provider_manager_); + message_ptr = + server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( + universal_matcher); + const auto& scoped_routes_config_dump7 = + TestUtility::downcastAndValidate( + *message_ptr); + TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - name: foo-scoped-routes scoped_route_configs: - name: foo "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: baz } key: fragments: { string_key: "172.10.10.10" } - - name: foo2 - "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration - route_configuration_name: foo-route-config2 - key: - fragments: { string_key: "172.10.10.20" } last_updated: seconds: 1234567891 - nanos: 234000000 + nanos: 567000000 dynamic_scoped_route_configs: - name: foo_scoped_routes last_updated: @@ -977,16 +1171,9 @@ route_configuration_name: dynamic-foo-route-config version_info: "2" )EOF", expected_config_dump); - message_ptr = - server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"]( - universal_matcher); - const auto& scoped_routes_config_dump6 = - TestUtility::downcastAndValidate( - *message_ptr); - EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump6)); + EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump7)); } -// Tests that SRDS only allows creation of delta static config providers. TEST_F(ScopedRdsTest, DeltaStaticConfigProviderOnly) { // Use match all regex due to lack of distinctive matchable output for // coverage test. diff --git a/test/common/shared_pool/shared_pool_test.cc b/test/common/shared_pool/shared_pool_test.cc index 4eb47b953d24..c82f6d2dcfda 100644 --- a/test/common/shared_pool/shared_pool_test.cc +++ b/test/common/shared_pool/shared_pool_test.cc @@ -60,15 +60,6 @@ class SharedPoolTest : public testing::Test { go.WaitForNotification(); } - void clearPointerOnMainThread(std::shared_ptr& intptr) { - absl::Notification go; - dispatcher_->post([&intptr, &go]() { - intptr.reset(); - go.Notify(); - }); - go.WaitForNotification(); - } - Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; Thread::ThreadPtr dispatcher_thread_; @@ -155,7 +146,6 @@ TEST_F(SharedPoolTest, GetObjectAndDeleteObjectRaceForSameHashValue) { go_.Notify(); }); go_.WaitForNotification(); - clearPointerOnMainThread(o2); deferredDeleteSharedPoolOnMainThread(pool); } @@ -181,7 +171,6 @@ TEST_F(SharedPoolTest, RaceCondtionForGetObjectWithObjectDeleter) { pool->sync().signal(ObjectSharedPool::ObjectDeleterEntry); thread->join(); EXPECT_EQ(4, *o2); - clearPointerOnMainThread(o2); deferredDeleteSharedPoolOnMainThread(pool); } diff --git a/test/common/singleton/manager_impl_test.cc b/test/common/singleton/manager_impl_test.cc index d3184bae8f58..64e08cdb9384 100644 --- a/test/common/singleton/manager_impl_test.cc +++ b/test/common/singleton/manager_impl_test.cc @@ -42,6 +42,23 @@ TEST(SingletonManagerImplTest, Basic) { singleton.reset(); } +TEST(SingletonManagerImplTest, NonConstructingGetTyped) { + ManagerImpl manager(Thread::threadFactoryForTest()); + + // Access without first constructing should be null. + EXPECT_EQ(nullptr, manager.getTyped("test_singleton")); + + std::shared_ptr singleton = std::make_shared(); + // Use a construct on first use getter. + EXPECT_EQ(singleton, manager.get("test_singleton", [singleton] { return singleton; })); + // Now access should return the constructed singleton. + EXPECT_EQ(singleton, manager.getTyped("test_singleton")); + EXPECT_EQ(1UL, singleton.use_count()); + + EXPECT_CALL(*singleton, onDestroy()); + singleton.reset(); +} + } // namespace } // namespace Singleton } // namespace Envoy diff --git a/test/common/stats/histogram_impl_test.cc b/test/common/stats/histogram_impl_test.cc index b55f25c83497..8681b8dc8945 100644 --- a/test/common/stats/histogram_impl_test.cc +++ b/test/common/stats/histogram_impl_test.cc @@ -95,5 +95,17 @@ TEST_F(HistogramSettingsImplTest, Priority) { EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({1, 2})); } +TEST_F(HistogramSettingsImplTest, ScaledPercent) { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(0.1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + + initialize(); + EXPECT_EQ(settings_->buckets("test"), settings_->defaultBuckets()); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({0.1, 2})); +} + } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index ea9e5b15c377..875dcf3b5a2f 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -155,10 +155,25 @@ class TestStore : public SymbolTableProvider, public IsolatedStoreImpl { GaugeOptConstRef findGaugeByString(const std::string& name) const; HistogramOptConstRef findHistogramByString(const std::string& name) const; + void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { + histogram_values_map_[histogram.name()].push_back(value); + } + + std::vector histogramValues(const std::string& name, bool clear) { + auto it = histogram_values_map_.find(name); + ASSERT(it != histogram_values_map_.end(), absl::StrCat("Couldn't find histogram ", name)); + std::vector copy = it->second; + if (clear) { + it->second.clear(); + } + return copy; + } + private: absl::flat_hash_map counter_map_; absl::flat_hash_map gauge_map_; absl::flat_hash_map histogram_map_; + absl::flat_hash_map> histogram_values_map_; }; // Compares the memory consumed against an exact expected value, but only on diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index 8026bde7de90..2cebca68cd1a 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -219,6 +219,24 @@ class TestStreamInfo : public StreamInfo::StreamInfo { absl::optional attemptCount() const override { return attempt_count_; } + const Envoy::StreamInfo::BytesMeterSharedPtr& getUpstreamBytesMeter() const override { + return upstream_bytes_meter_; + } + + const Envoy::StreamInfo::BytesMeterSharedPtr& getDownstreamBytesMeter() const override { + return downstream_bytes_meter_; + } + + void setUpstreamBytesMeter( + const Envoy::StreamInfo::BytesMeterSharedPtr& upstream_bytes_meter) override { + upstream_bytes_meter_ = upstream_bytes_meter; + } + + void setDownstreamBytesMeter( + const Envoy::StreamInfo::BytesMeterSharedPtr& downstream_bytes_meter) override { + downstream_bytes_meter_ = downstream_bytes_meter; + } + Random::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; @@ -262,6 +280,10 @@ class TestStreamInfo : public StreamInfo::StreamInfo { Tracing::Reason trace_reason_{Tracing::Reason::NotTraceable}; absl::optional upstream_connection_id_; absl::optional attempt_count_; + Envoy::StreamInfo::BytesMeterSharedPtr upstream_bytes_meter_{ + std::make_shared()}; + Envoy::StreamInfo::BytesMeterSharedPtr downstream_bytes_meter_{ + std::make_shared()}; }; } // namespace Envoy diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 04178b7207b3..59766ef3c4e1 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -61,10 +61,11 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { pool_ready_.ready(); } - void onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, + void onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view failure_reason, Upstream::HostDescriptionConstSharedPtr host) override { reason_ = reason; host_ = host; + failure_reason_string_ = std::string(failure_reason); pool_failure_.ready(); } @@ -73,6 +74,7 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { ReadyWatcher pool_ready_; ConnectionPool::ConnectionDataPtr conn_data_{}; absl::optional reason_; + std::string failure_reason_string_; Upstream::HostDescriptionConstSharedPtr host_; Ssl::ConnectionInfoConstSharedPtr ssl_; }; @@ -321,6 +323,7 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, void prepareConn() { connection_ = new StrictMock(); + EXPECT_CALL(*connection_, transportFailureReason()).Times(AtLeast(0)); EXPECT_CALL(*connection_, setBufferLimits(0)); EXPECT_CALL(*connection_, detectEarlyCloseWhenReadDisabled(false)); EXPECT_CALL(*connection_, addConnectionCallbacks(_)); @@ -650,10 +653,13 @@ TEST_P(TcpConnPoolImplTest, RemoteConnectFailure) { EXPECT_CALL(*conn_pool_->test_conns_[0].connect_timer_, disableTimer()); EXPECT_CALL(*conn_pool_, onConnDestroyedForTest()); + EXPECT_CALL(*conn_pool_->test_conns_[0].connection_, transportFailureReason()) + .WillOnce(Return("foo")); conn_pool_->test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, callbacks.reason_); + EXPECT_EQ("foo", callbacks.failure_reason_string_); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); diff --git a/test/common/thread_local/thread_local_impl_test.cc b/test/common/thread_local/thread_local_impl_test.cc index f6937240803d..d9a774e8bf9a 100644 --- a/test/common/thread_local/thread_local_impl_test.cc +++ b/test/common/thread_local/thread_local_impl_test.cc @@ -17,17 +17,17 @@ namespace ThreadLocal { TEST(MainThreadVerificationTest, All) { // Before threading is on, assertion on main thread should be true. - EXPECT_TRUE(Thread::MainThread::isMainThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); { InstanceImpl tls; // Tls instance has been initialized. // Call to main thread verification should succeed in main thread. - EXPECT_TRUE(Thread::MainThread::isMainThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); tls.shutdownGlobalThreading(); tls.shutdownThread(); } // After threading is off, assertion on main thread should be true. - EXPECT_TRUE(Thread::MainThread::isMainThread()); + EXPECT_TRUE(Thread::MainThread::isMainOrTestThread()); } class TestThreadLocalObject : public ThreadLocalObject { @@ -301,7 +301,7 @@ TEST(ThreadLocalInstanceImplDispatcherTest, Dispatcher) { // Verify we have the expected dispatcher for the new thread thread. EXPECT_EQ(thread_dispatcher.get(), &tls.dispatcher()); // Verify that it is inside the worker thread. - EXPECT_FALSE(Thread::MainThread::isMainThread()); + EXPECT_FALSE(Thread::MainThread::isMainOrTestThread()); }); thread->join(); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 684e16be97e7..436ff9945cb3 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -35,6 +35,10 @@ envoy_cc_test( envoy_cc_test( name = "cluster_manager_impl_test", srcs = ["cluster_manager_impl_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], external_deps = [ "abseil_optional", ], @@ -42,6 +46,7 @@ envoy_cc_test( ":test_cluster_manager", "//source/common/router:context_lib", "//source/common/upstream:load_balancer_factory_base_lib", + "//source/extensions/network/dns_resolver/cares:config", "//source/extensions/transport_sockets/tls:config", "//test/config:v2_link_hacks", "//test/integration/load_balancers:custom_lb_policy", @@ -141,6 +146,7 @@ envoy_cc_benchmark_binary( "//source/common/config:grpc_subscription_lib", "//source/common/config:protobuf_link_hacks", "//source/common/config:utility_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/upstream:eds_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//source/server:transport_socket_config_lib", @@ -666,6 +672,8 @@ envoy_cc_test( "//test/mocks/upstream:cluster_manager_mocks", "//test/mocks/upstream:health_checker_mocks", "//test/mocks/upstream:priority_set_mocks", + "//test/mocks/upstream:thread_aware_load_balancer_mocks", + "//test/mocks/upstream:typed_load_balancer_factory_mocks", "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", @@ -827,6 +835,7 @@ envoy_proto_library( srcs = ["round_robin_load_balancer_fuzz.proto"], deps = [ "//test/common/upstream:zone_aware_load_balancer_fuzz_proto", + "@envoy_api//envoy/config/cluster/v3:pkg", ], ) diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index bfadeb85b292..054e51b9e818 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -22,6 +22,7 @@ #include "test/mocks/upstream/load_balancer_context.h" #include "test/mocks/upstream/thread_aware_load_balancer.h" #include "test/test_common/test_runtime.h" +#include "test/test_common/utility.h" namespace Envoy { namespace Upstream { @@ -69,11 +70,23 @@ std::string clustersJson(const std::vector& clusters) { return fmt::sprintf("\"clusters\": [%s]", absl::StrJoin(clusters, ",")); } +void verifyCaresDnsConfigAndUnpack( + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config, + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig& cares) { + // Verify typed DNS resolver config is c-ares. + EXPECT_EQ(typed_dns_resolver_config.name(), std::string(Network::CaresDnsResolver)); + EXPECT_EQ( + typed_dns_resolver_config.typed_config().type_url(), + "type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig"); + typed_dns_resolver_config.typed_config().UnpackTo(&cares); +} + class ClusterManagerImplTest : public testing::Test { public: ClusterManagerImplTest() : http_context_(factory_.stats_.symbolTable()), grpc_context_(factory_.stats_.symbolTable()), - router_context_(factory_.stats_.symbolTable()) {} + router_context_(factory_.stats_.symbolTable()), + registered_dns_factory_(dns_resolver_factory_) {} void create(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { cluster_manager_ = std::make_unique( @@ -177,12 +190,10 @@ class ClusterManagerImplTest : public testing::Test { Http::ContextImpl http_context_; Grpc::ContextImpl grpc_context_; Router::ContextImpl router_context_; + NiceMock dns_resolver_factory_; + Registry::InjectFactory registered_dns_factory_; }; -MATCHER_P(CustomDnsResolversSizeEquals, expectedResolvers, "") { - return expectedResolvers.size() == arg.size(); -} - envoy::config::bootstrap::v3::Bootstrap defaultConfig() { const std::string yaml = R"EOF( static_resources: @@ -2203,7 +2214,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver)); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Event::MockTimer* dns_timer_ = new NiceMock(&factory_.dispatcher_); @@ -2340,7 +2351,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver)); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Event::MockTimer* dns_timer_ = new NiceMock(&factory_.dispatcher_); @@ -2555,7 +2566,7 @@ TEST_F(ClusterManagerImplTest, UseTcpInDefaultDnsResolver) { std::shared_ptr dns_resolver(new Network::MockDnsResolver()); // As custom resolvers are not specified in config, this method should not be called, // resolver from context should be used instead. - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).Times(0); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).Times(0); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; @@ -2581,18 +2592,61 @@ TEST_F(ClusterManagerImplTest, CustomDnsResolverSpecifiedViaDeprecatedField) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - auto resolvers = envoy::config::core::v3::Address(); - resolvers.mutable_socket_address()->set_address("1.2.3.4"); - resolvers.mutable_socket_address()->set_port_value(80); - std::vector expectedDnsResolvers; - expectedDnsResolvers.push_back(Network::Address::resolveProtoAddress(resolvers)); - + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); // As custom resolver is specified via deprecated field `dns_resolvers` in clusters // config, the method `createDnsResolver` is called once. - EXPECT_CALL(factory_.dispatcher_, - createDnsResolver(CustomDnsResolversSizeEquals(expectedDnsResolvers), _)) + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) .WillOnce(Return(dns_resolver)); + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + factory_.tls_.shutdownThread(); +} +// Test that custom DNS resolver is used, when custom resolver is configured +// per cluster and deprecated field `dns_resolvers` is specified with multiple resolvers. +TEST_F(ClusterManagerImplTest, CustomDnsResolverSpecifiedViaDeprecatedFieldMultipleResolvers) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + dns_resolvers: + - socket_address: + address: 1.2.3.4 + port_value: 80 + - socket_address: + address: 1.2.3.5 + port_value: 81 + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.5", 81), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + // As custom resolver is specified via deprecated field `dns_resolvers` in clusters + // config, the method `createDnsResolver` is called once. + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) + .WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) @@ -2617,16 +2671,106 @@ TEST_F(ClusterManagerImplTest, CustomDnsResolverSpecified) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - auto resolvers = envoy::config::core::v3::Address(); - resolvers.mutable_socket_address()->set_address("1.2.3.4"); - resolvers.mutable_socket_address()->set_port_value(80); - std::vector expectedDnsResolvers; - expectedDnsResolvers.push_back(Network::Address::resolveProtoAddress(resolvers)); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); // As custom resolver is specified via field `dns_resolution_config.resolvers` in clusters // config, the method `createDnsResolver` is called once. - EXPECT_CALL(factory_.dispatcher_, - createDnsResolver(CustomDnsResolversSizeEquals(expectedDnsResolvers), _)) + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) + .WillOnce(Return(dns_resolver)); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + factory_.tls_.shutdownThread(); +} + +// Test that custom DNS resolver is used, when custom resolver is configured per cluster, +// and multiple resolvers are configured. +TEST_F(ClusterManagerImplTest, CustomDnsResolverSpecifiedMultipleResolvers) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + dns_resolution_config: + resolvers: + - socket_address: + address: 1.2.3.4 + port_value: 80 + - socket_address: + address: 1.2.3.5 + port_value: 81 + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.5", 81), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + + // As custom resolver is specified via field `dns_resolution_config.resolvers` in clusters + // config, the method `createDnsResolver` is called once. + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) + .WillOnce(Return(dns_resolver)); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + factory_.tls_.shutdownThread(); +} + +// Test that custom DNS resolver is used and overriding the specified deprecated field +// `dns_resolvers`. +TEST_F(ClusterManagerImplTest, CustomDnsResolverSpecifiedOveridingDeprecatedResolver) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + dns_resolvers: + - socket_address: + address: 1.2.3.4 + port_value: 80 + dns_resolution_config: + resolvers: + - socket_address: + address: 1.2.3.5 + port_value: 81 + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.5", 81), + resolvers); + cares.add_resolvers()->MergeFrom(resolvers); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + + // As custom resolver is specified via field `dns_resolution_config.resolvers` in clusters + // config, the method `createDnsResolver` is called once. + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) .WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; @@ -2654,17 +2798,19 @@ TEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolver) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `false` here means use_tcp_for_dns_lookups is not being set via bootstrap config - EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); factory_.tls_.shutdownThread(); } @@ -2685,17 +2831,19 @@ TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolverViaDeprecatedField) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `true` here means use_tcp_for_dns_lookups is set to true - EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); factory_.tls_.shutdownThread(); } @@ -2720,17 +2868,18 @@ TEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolverDeprecatedFieldOverrid )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `false` here means dns_resolver_options.use_tcp_for_dns_lookups is set to false. - EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); factory_.tls_.shutdownThread(); } @@ -2755,17 +2904,19 @@ TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolverDeprecatedFieldOverrid )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. - EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); factory_.tls_.shutdownThread(); } @@ -2789,17 +2940,19 @@ TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolver) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. - EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); factory_.tls_.shutdownThread(); } @@ -2820,17 +2973,19 @@ TEST_F(ClusterManagerImplTest, DefaultSearchDomainWithCustomDnsResolver) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `false` here means no_default_search_domain is not being set via bootstrap config - EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); factory_.tls_.shutdownThread(); } @@ -2853,17 +3008,19 @@ TEST_F(ClusterManagerImplTest, DefaultSearchDomainWithCustomDnsResolverWithConfi )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `false` here means dns_resolver_options.no_default_search_domain is set to false. - EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); factory_.tls_.shutdownThread(); } @@ -2886,17 +3043,248 @@ TEST_F(ClusterManagerImplTest, NoDefaultSearchDomainWithCustomDnsResolver) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(dns_resolver))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + factory_.tls_.shutdownThread(); +} + +// Test that typed_dns_resolver_config is specified and is used. +TEST_F(ClusterManagerImplTest, TypedDnsResolverConfigSpecified) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.2.3.4" + port_value: 80 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); + factory_.tls_.shutdownThread(); +} + +// Test that resolvers in typed_dns_resolver_config is specified and is used. +TEST_F(ClusterManagerImplTest, TypedDnsResolverConfigResolversSpecified) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.2.3.4" + port_value: 80 + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); + factory_.tls_.shutdownThread(); +} + +// Test that multiple resolvers in typed_dns_resolver_config is specified and is used. +TEST_F(ClusterManagerImplTest, TypedDnsResolverConfigMultipleResolversSpecified) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.2.3.4" + port_value: 80 + - socket_address: + address: "1.2.3.5" + port_value: 81 + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); create(parseBootstrapFromV3Yaml(yaml)); - // `true` here means dns_resolver_options.no_default_search_domain is set to true. - EXPECT_EQ(true, dns_resolver_options.no_default_search_domain()); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.5", 81), + resolvers); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(1), resolvers)); + factory_.tls_.shutdownThread(); +} + +// Test that dns_resolver_options in typed_dns_resolver_config is specified and is used. +TEST_F(ClusterManagerImplTest, TypedDnsResolverConfigResolverOptionsSpecified) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_EQ(0, cares.resolvers().size()); + factory_.tls_.shutdownThread(); +} + +// Test that when typed_dns_resolver_config is specified, it is used. All other deprecated +// configurations are ignored, which includes dns_resolvers, use_tcp_for_dns_lookups, and +// dns_resolution_config. +TEST_F(ClusterManagerImplTest, TypedDnsResolverConfigSpecifiedOveridingDeprecatedConfig) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STRICT_DNS + dns_resolvers: + - socket_address: + address: 1.2.3.4 + port_value: 80 + use_tcp_for_dns_lookups: false + dns_resolution_config: + resolvers: + - socket_address: + address: 1.2.3.5 + port_value: 81 + dns_resolver_options: + use_tcp_for_dns_lookups: false + no_default_search_domain: false + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "9.10.11.12" + port_value: 100 + - socket_address: + address: "5.6.7.8" + port_value: 200 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + )EOF"; + + std::shared_ptr dns_resolver(new Network::MockDnsResolver()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(dns_resolver))); + + Network::DnsResolver::ResolveCb dns_callback; + Network::MockActiveDnsQuery active_dns_query; + EXPECT_CALL(*dns_resolver, resolve(_, _, _)) + .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); + create(parseBootstrapFromV3Yaml(yaml)); + + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("9.10.11.12", 100), + resolvers); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(0), resolvers)); + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("5.6.7.8", 200), + resolvers); + EXPECT_EQ(true, TestUtility::protoEqual(cares.resolvers(1), resolvers)); factory_.tls_.shutdownThread(); } @@ -2929,7 +3317,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver)); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Event::MockTimer* dns_timer_ = new NiceMock(&factory_.dispatcher_); @@ -3021,7 +3409,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { )EOF"; std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - EXPECT_CALL(factory_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(dns_resolver)); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).WillOnce(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Event::MockTimer* dns_timer_ = new NiceMock(&factory_.dispatcher_); diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index 0eb5fae8bd60..c3d6a43e27ef 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -12,6 +12,7 @@ #include "source/common/config/grpc_subscription_impl.h" #include "source/common/config/protobuf_link_hacks.h" #include "source/common/config/utility.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/singleton/manager_impl.h" #include "source/common/upstream/eds.h" #include "source/server/transport_socket_config_impl.h" @@ -39,16 +40,24 @@ namespace Upstream { class EdsSpeedTest { public: - EdsSpeedTest(State& state) - : state_(state), + EdsSpeedTest(State& state, bool use_unified_mux) + : state_(state), use_unified_mux_(use_unified_mux), type_url_("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"), subscription_stats_(Config::Utility::generateStats(stats_)), - api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()), - grpc_mux_(new Config::GrpcMuxImpl( - local_info_, std::unique_ptr(async_client_), dispatcher_, - *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), - random_, stats_, {}, true)) { + api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()) { + if (use_unified_mux_) { + grpc_mux_.reset(new Config::XdsMux::GrpcMuxSotw( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), + random_, stats_, {}, local_info_, true)); + } else { + grpc_mux_.reset(new Config::GrpcMuxImpl( + local_info_, std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), + random_, stats_, {}, true)); + } resetCluster(R"EOF( name: name connect_timeout: 0.25s @@ -128,13 +137,22 @@ class EdsSpeedTest { auto* resource = response->mutable_resources()->Add(); resource->PackFrom(cluster_load_assignment); state_.ResumeTiming(); - grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); + if (use_unified_mux_) { + dynamic_cast(*grpc_mux_) + .grpcStreamForTest() + .onReceiveMessage(std::move(response)); + } else { + dynamic_cast(*grpc_mux_) + .grpcStreamForTest() + .onReceiveMessage(std::move(response)); + } ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() == num_hosts); } TestDeprecatedV2Api _deprecated_v2_api_; State& state_; + bool use_unified_mux_; const std::string type_url_; uint64_t version_{}; bool initialized_{}; @@ -159,7 +177,7 @@ class EdsSpeedTest { Server::MockOptions options_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; - Config::GrpcMuxImplSharedPtr grpc_mux_; + Config::GrpcMuxSharedPtr grpc_mux_; Config::GrpcSubscriptionImplPtr subscription_; }; @@ -170,8 +188,8 @@ static void priorityAndLocalityWeighted(State& state) { Envoy::Thread::MutexBasicLockable lock; Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); - for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state); + for (auto _ : state) { // NOLINT: Silences warning about dead store + Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(2)); // if we've been instructed to skip tests, only run once no matter the argument: uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(1); @@ -180,7 +198,7 @@ static void priorityAndLocalityWeighted(State& state) { } BENCHMARK(priorityAndLocalityWeighted) - ->Ranges({{false, true}, {1, 100000}}) + ->Ranges({{false, true}, {1, 100000}, {false, true}}) ->Unit(benchmark::kMillisecond); static void duplicateUpdate(State& state) { @@ -188,8 +206,8 @@ static void duplicateUpdate(State& state) { Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); - for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state); + for (auto _ : state) { // NOLINT: Silences warning about dead store + Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(1)); uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); @@ -197,14 +215,14 @@ static void duplicateUpdate(State& state) { } } -BENCHMARK(duplicateUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); +BENCHMARK(duplicateUpdate)->Ranges({{1, 100000}, {false, true}})->Unit(benchmark::kMillisecond); static void healthOnlyUpdate(State& state) { Envoy::Thread::MutexBasicLockable lock; Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); - for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state); + for (auto _ : state) { // NOLINT: Silences warning about dead store + Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(1)); uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); @@ -212,4 +230,4 @@ static void healthOnlyUpdate(State& state) { } } -BENCHMARK(healthOnlyUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); +BENCHMARK(healthOnlyUpdate)->Ranges({{1, 100000}, {false, true}})->Unit(benchmark::kMillisecond); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index a3d987500aeb..b4eb1164f15f 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -355,9 +355,8 @@ TEST_F(EdsTest, EdsClusterFromFileIsPrimaryCluster) { EXPECT_TRUE(initialized_); } -namespace { - -void endpointWeightChangeCausesRebuildTest(EdsTest& test, bool expect_rebuild) { +// Verify that host weight changes cause a full rebuild. +TEST_F(EdsTest, EndpointWeightChangeCausesRebuild) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); auto* endpoints = cluster_load_assignment.add_endpoints(); @@ -366,45 +365,28 @@ void endpointWeightChangeCausesRebuildTest(EdsTest& test, bool expect_rebuild) { endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); endpoint->mutable_load_balancing_weight()->set_value(30); - test.initialize(); - test.doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); - EXPECT_TRUE(test.initialized_); - EXPECT_EQ(0UL, test.stats_.counter("cluster.name.update_no_rebuild").value()); - EXPECT_EQ(30UL, - test.stats_.gauge("cluster.name.max_host_weight", Stats::Gauge::ImportMode::Accumulate) - .value()); - auto& hosts = test.cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + initialize(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + EXPECT_TRUE(initialized_); + EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); + EXPECT_EQ( + 30UL, + stats_.gauge("cluster.name.max_host_weight", Stats::Gauge::ImportMode::Accumulate).value()); + auto& hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(hosts.size(), 1); EXPECT_EQ(hosts[0]->weight(), 30); endpoint->mutable_load_balancing_weight()->set_value(31); - test.doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); - EXPECT_EQ(expect_rebuild ? 0UL : 1UL, - test.stats_.counter("cluster.name.update_no_rebuild").value()); - EXPECT_EQ(31UL, - test.stats_.gauge("cluster.name.max_host_weight", Stats::Gauge::ImportMode::Accumulate) - .value()); - auto& new_hosts = test.cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + EXPECT_EQ(0UL, stats_.counter("cluster.name.update_no_rebuild").value()); + EXPECT_EQ( + 31UL, + stats_.gauge("cluster.name.max_host_weight", Stats::Gauge::ImportMode::Accumulate).value()); + auto& new_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(new_hosts.size(), 1); EXPECT_EQ(new_hosts[0]->weight(), 31); } -} // namespace - -// Verify that host weight changes cause a full rebuild. -TEST_F(EdsTest, EndpointWeightChangeCausesRebuild) { - endpointWeightChangeCausesRebuildTest(*this, true); -} - -// Verify that host weight changes do not cause a full rebuild when the feature flag is disabled. -TEST_F(EdsTest, EndpointWeightChangeCausesRebuildDisabled) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.upstream_host_weight_change_causes_rebuild", "false"}}); - - endpointWeightChangeCausesRebuildTest(*this, false); -} - // Validate that onConfigUpdate() updates the endpoint metadata. TEST_F(EdsTest, EndpointMetadata) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; @@ -2308,6 +2290,29 @@ TEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) { } } +// Validate that onConfigUpdate() with a config that contains both LEDS config +// source and explicit list of endpoints is rejected. +TEST_F(EdsTest, OnConfigUpdateLedsAndEndpoints) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + // Add an endpoint. + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* endpoint = endpoints->add_lb_endpoints(); + endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_address("1.2.3.4"); + endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); + // Configure an LEDS data source. + auto* leds_conf = endpoints->mutable_leds_cluster_locality_config(); + leds_conf->set_leds_collection_name("xdstp://foo/leds/collection"); + initialize(); + + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, + "A ClusterLoadAssignment for cluster fare cannot include both LEDS " + "(resource: xdstp://foo/leds/collection) and a list of endpoints."); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index 48c673784d2b..23da61c810cb 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -313,6 +313,36 @@ TEST_F(HdsTest, TestProcessMessageEndpoints) { } } +TEST_F(HdsTest, TestHdsCluster) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + message = std::make_unique(); + message->mutable_interval()->set_seconds(1); + + auto* health_check = message->add_cluster_health_checks(); + health_check->set_cluster_name("test_cluster"); + auto* address = health_check->add_locality_endpoints()->add_endpoints()->mutable_address(); + address->mutable_socket_address()->set_address("127.0.0.2"); + address->mutable_socket_address()->set_port_value(1234); + + // Process message + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillOnce(Return(cluster_info_)); + hds_delegate_friend_.processPrivateMessage(*hds_delegate_, std::move(message)); + + EXPECT_EQ(hds_delegate_->hdsClusters()[0]->initializePhase(), + Upstream::Cluster::InitializePhase::Primary); + + // HdsCluster uses health_checkers_ instead. + EXPECT_TRUE(hds_delegate_->hdsClusters()[0]->healthChecker() == nullptr); + + // outlier detector is always null for HdsCluster. + EXPECT_TRUE(hds_delegate_->hdsClusters()[0]->outlierDetector() == nullptr); + const auto* hds_cluster = hds_delegate_->hdsClusters()[0].get(); + EXPECT_TRUE(hds_cluster->outlierDetector() == nullptr); +} + // Test if processMessage processes health checks from a HealthCheckSpecifier // message correctly TEST_F(HdsTest, TestProcessMessageHealthChecks) { diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index cc5fa210e69b..ba2dfe80fce2 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -3135,14 +3135,17 @@ TEST(HttpStatusChecker, Default) { path: /healthcheck )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_TRUE(http_status_checker.inRange(200)); - EXPECT_FALSE(http_status_checker.inRange(204)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(200)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(204)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(200)); } -TEST(HttpStatusChecker, Single100) { +TEST(HttpStatusChecker, SingleExpected100) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3157,17 +3160,44 @@ TEST(HttpStatusChecker, Single100) { end: 101 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(200)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); - EXPECT_FALSE(http_status_checker.inRange(99)); - EXPECT_TRUE(http_status_checker.inRange(100)); - EXPECT_FALSE(http_status_checker.inRange(101)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(99)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(100)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(101)); } -TEST(HttpStatusChecker, Single599) { +TEST(HttpStatusChecker, SingleRetriable100) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 100 + end: 101 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inRetriableRanges(99)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(100)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(101)); +} + +TEST(HttpStatusChecker, SingleExpected599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3182,17 +3212,44 @@ TEST(HttpStatusChecker, Single599) { end: 600 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(598)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(599)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(600)); +} + +TEST(HttpStatusChecker, SingleRetriable599) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 599 + end: 600 + )EOF"; - EXPECT_FALSE(http_status_checker.inRange(200)); + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(598)); - EXPECT_TRUE(http_status_checker.inRange(599)); - EXPECT_FALSE(http_status_checker.inRange(600)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(598)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(599)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(600)); } -TEST(HttpStatusChecker, Ranges_204_304) { +TEST(HttpStatusChecker, ExpectedRanges_204_304) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3209,20 +3266,52 @@ TEST(HttpStatusChecker, Ranges_204_304) { end: 305 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(200)); + + EXPECT_FALSE(http_status_checker.inExpectedRanges(203)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(204)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(205)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(303)); + EXPECT_TRUE(http_status_checker.inExpectedRanges(304)); + EXPECT_FALSE(http_status_checker.inExpectedRanges(305)); +} + +TEST(HttpStatusChecker, RetriableRanges_304_404) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 304 + end: 305 + - start: 404 + end: 405 + )EOF"; - EXPECT_FALSE(http_status_checker.inRange(200)); + auto conf = parseHealthCheckFromV3Yaml(yaml); + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), conf.http_health_check().retriable_statuses(), + 200); - EXPECT_FALSE(http_status_checker.inRange(203)); - EXPECT_TRUE(http_status_checker.inRange(204)); - EXPECT_FALSE(http_status_checker.inRange(205)); - EXPECT_FALSE(http_status_checker.inRange(303)); - EXPECT_TRUE(http_status_checker.inRange(304)); - EXPECT_FALSE(http_status_checker.inRange(305)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(303)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(304)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(305)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(403)); + EXPECT_TRUE(http_status_checker.inRetriableRanges(404)); + EXPECT_FALSE(http_status_checker.inRetriableRanges(405)); } -TEST(HttpStatusChecker, Below100) { +TEST(HttpStatusChecker, ExpectedBelow100) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3237,13 +3326,40 @@ TEST(HttpStatusChecker, Below100) { end: 100 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); + EXPECT_THROW_WITH_MESSAGE( + HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http expected status range: expecting start >= 100, but found start=99"); +} + +TEST(HttpStatusChecker, RetriableBelow100) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + retriable_statuses: + - start: 99 + end: 100 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), - EnvoyException, "Invalid http status range: expecting start >= 100, but found start=99"); + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http retriable status range: expecting start >= 100, but found start=99"); } -TEST(HttpStatusChecker, Above599) { +TEST(HttpStatusChecker, ExpectedAbove599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3258,13 +3374,16 @@ TEST(HttpStatusChecker, Above599) { end: 601 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), - EnvoyException, "Invalid http status range: expecting end <= 600, but found end=601"); + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http expected status range: expecting end <= 600, but found end=601"); } -TEST(HttpStatusChecker, InvalidRange) { +TEST(HttpStatusChecker, RetriableAbove599) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3274,19 +3393,21 @@ TEST(HttpStatusChecker, InvalidRange) { service_name_matcher: prefix: locations path: /healthchecka - expected_statuses: - - start: 200 - end: 200 + retriable_statuses: + - start: 600 + end: 601 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), EnvoyException, - "Invalid http status range: expecting start < end, but found start=200 and end=200"); + "Invalid http retriable status range: expecting end <= 600, but found end=601"); } -TEST(HttpStatusChecker, InvalidRange2) { +TEST(HttpStatusChecker, InvalidExpectedRange) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3297,15 +3418,41 @@ TEST(HttpStatusChecker, InvalidRange2) { prefix: locations path: /healthchecka expected_statuses: - - start: 201 + - start: 200 end: 200 )EOF"; + auto conf = parseHealthCheckFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), EnvoyException, - "Invalid http status range: expecting start < end, but found start=201 and end=200"); + "Invalid http expected status range: expecting start < end, but found start=200 and end=200"); +} + +TEST(HttpStatusChecker, InvalidRetriableRange) { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthchecka + retriable_statuses: + - start: 200 + end: 200 + )EOF"; + + auto conf = parseHealthCheckFromV3Yaml(yaml); + EXPECT_THROW_WITH_MESSAGE(HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( + conf.http_health_check().expected_statuses(), + conf.http_health_check().retriable_statuses(), 200), + EnvoyException, + "Invalid http retriable status range: expecting start < end, but found " + "start=200 and end=200"); } TEST(TcpHealthCheckMatcher, loadJsonBytes) { diff --git a/test/common/upstream/least_request_load_balancer_fuzz_test.cc b/test/common/upstream/least_request_load_balancer_fuzz_test.cc index 85b0689f4d1e..2bc4958d44e4 100644 --- a/test/common/upstream/least_request_load_balancer_fuzz_test.cc +++ b/test/common/upstream/least_request_load_balancer_fuzz_test.cc @@ -65,7 +65,7 @@ DEFINE_PROTO_FUZZER(const test::common::upstream::LeastRequestLoadBalancerTestCa zone_aware_load_balancer_fuzz.stats_, zone_aware_load_balancer_fuzz.runtime_, zone_aware_load_balancer_fuzz.random_, zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config(), - input.least_request_lb_config()); + input.least_request_lb_config(), zone_aware_load_balancer_fuzz.simTime()); } catch (EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException; {}", e.what()); removeRequestsActiveForStaticHosts(zone_aware_load_balancer_fuzz.priority_set_); diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index 22f58f0f9337..6a855bae9aac 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -71,6 +71,7 @@ class BaseTester : public Event::TestUsingSimulatedTime { NiceMock runtime_; Random::RandomGeneratorImpl random_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; std::shared_ptr info_{new NiceMock()}; }; @@ -81,7 +82,8 @@ class RoundRobinTester : public BaseTester { void initialize() { lb_ = std::make_unique(priority_set_, &local_priority_set_, stats_, - runtime_, random_, common_config_); + runtime_, random_, common_config_, + round_robin_lb_config_, simTime()); } std::unique_ptr lb_; @@ -92,9 +94,9 @@ class LeastRequestTester : public BaseTester { LeastRequestTester(uint64_t num_hosts, uint32_t choice_count) : BaseTester(num_hosts) { envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(choice_count); - lb_ = - std::make_unique(priority_set_, &local_priority_set_, stats_, - runtime_, random_, common_config_, lr_lb_config); + lb_ = std::make_unique(priority_set_, &local_priority_set_, stats_, + runtime_, random_, common_config_, + lr_lb_config, simTime()); } std::unique_ptr lb_; @@ -541,10 +543,10 @@ class SubsetLbTester : public BaseTester { *selector->mutable_keys()->Add() = std::string(metadata_key); subset_info_ = std::make_unique(subset_config); - lb_ = std::make_unique(LoadBalancerType::Random, priority_set_, - &local_priority_set_, stats_, stats_store_, runtime_, - random_, *subset_info_, absl::nullopt, absl::nullopt, - absl::nullopt, common_config_); + lb_ = std::make_unique( + LoadBalancerType::Random, priority_set_, &local_priority_set_, stats_, stats_store_, + runtime_, random_, *subset_info_, absl::nullopt, absl::nullopt, absl::nullopt, + absl::nullopt, common_config_, simTime()); const HostVector& hosts = priority_set_.getOrCreateHostSet(0).hosts(); ASSERT(hosts.size() == num_hosts); diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index dea39058ef38..a21c09e641e2 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -32,6 +32,19 @@ using testing::ReturnRef; namespace Envoy { namespace Upstream { + +class EdfLoadBalancerBasePeer { +public: + static const std::chrono::milliseconds& slowStartWindow(EdfLoadBalancerBase& edf_lb) { + return edf_lb.slow_start_window_; + } + static double aggression(EdfLoadBalancerBase& edf_lb) { return edf_lb.aggression_; } + static const std::chrono::milliseconds latestHostAddedTime(EdfLoadBalancerBase& edf_lb) { + return std::chrono::time_point_cast(edf_lb.latest_host_added_time_) + .time_since_epoch(); + } +}; + namespace { static constexpr uint32_t UnhealthyStatus = 1u << static_cast(Host::Health::Unhealthy); @@ -62,6 +75,7 @@ class LoadBalancerTestBase : public Event::TestUsingSimulatedTime, std::shared_ptr info_{new NiceMock()}; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; }; class TestLb : public LoadBalancerBase { @@ -232,8 +246,8 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { const auto hs = lb_.chooseHostSet(&context, 0); switch (hs.second) { case LoadBalancerBase::HostAvailability::Healthy: - // Either we selected one of the healthy hosts or we failed to select anything and defaulted - // to healthy. + // Either we selected one of the healthy hosts or we failed to select anything and + // defaulted to healthy. EXPECT_TRUE(!hs.first.healthyHosts().empty() || (hs.first.healthyHosts().empty() && hs.first.degradedHosts().empty())); break; @@ -319,7 +333,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // Health P=0 == 100*1.4 == 35 P=1 == 35 // Since 3 hosts are excluded, P=0 should be considered fully healthy. // Total health = 100% + 35% is greater than 100%. Panic should not trigger. - updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 3 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 5 /* num_hosts */, 1 /* num_healthy_hosts */); ASSERT_THAT(getLoadPercentage(), ElementsAre(100, 0)); @@ -330,7 +346,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // All priorities are in panic mode (situation called TotalPanic) // Load is distributed based on number of hosts regardless of their health status. // P=0 and P=1 have 4 hosts each so each priority will receive 50% of the traffic. - updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 4 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 4 /* num_hosts */, 1 /* num_healthy_hosts */); ASSERT_THAT(getLoadPercentage(), ElementsAre(50, 50)); @@ -342,7 +360,9 @@ TEST_P(LoadBalancerBaseTest, GentleFailover) { // P=0 has 4 hosts with 1 excluded, P=1 has 6 hosts with 2 excluded. // P=0 should receive 4/(4+6)=40% of traffic // P=1 should receive 6/(4+6)=60% of traffic - updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, + updateHostSet(host_set_, 4 /* num_hosts */, 0 /* num_healthy_hosts */, 0 /* num_degraded_hosts + */ + , 1 /* num_excluded_hosts */); updateHostSet(failover_host_set_, 6 /* num_hosts */, 1 /* num_healthy_hosts */, 0 /* num_degraded_hosts */, 2 /* num_excluded_hosts */); @@ -572,6 +592,13 @@ class ZoneAwareLoadBalancerBaseTest : public LoadBalancerTestBase { TestZoneAwareLb lb_{priority_set_, stats_, runtime_, random_, common_config_}; }; +TEST_F(ZoneAwareLoadBalancerBaseTest, BaseMethods) { + EXPECT_FALSE(lb_.lifetimeCallbacks().has_value()); + std::vector hash_key; + auto mock_host = std::make_shared>(); + EXPECT_FALSE(lb_.selectExistingConnection(nullptr, *mock_host, hash_key).has_value()); +} + TEST_F(ZoneAwareLoadBalancerBaseTest, CrossPriorityHostMapUpdate) { // Fake cross priority host map. auto host_map = std::make_shared(); @@ -646,7 +673,8 @@ class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { local_priority_set_->getOrCreateHostSet(0); } lb_ = std::make_shared(priority_set_, local_priority_set_.get(), stats_, - runtime_, random_, common_config_); + runtime_, random_, common_config_, + round_robin_lb_config_, simTime()); } // Updates priority 0 with the given hosts and hosts_per_locality. @@ -1375,8 +1403,8 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { // The following host distribution with current precision should lead to the no_capacity_left // situation. - // Reuse the same host in all of the structures below to reduce time test takes and this does not - // impact load balancing logic. + // Reuse the same host in all of the structures below to reduce time test takes and this does + // not impact load balancing logic. HostSharedPtr host = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); HostVector current(45000); @@ -1555,10 +1583,302 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) { INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, RoundRobinLoadBalancerTest, ::testing::Values(true, false)); +TEST_P(RoundRobinLoadBalancerTest, SlowStartWithDefaultParams) { + init(false); + const auto slow_start_window = + EdfLoadBalancerBasePeer::slowStartWindow(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(0), slow_start_window); + const auto aggression = + EdfLoadBalancerBasePeer::aggression(static_cast(*lb_)); + EXPECT_EQ(1.0, aggression); + const auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartNoWait) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host_set_.hosts_ = {host1}; + + init(true); + + // As no healthcheck is configured, hosts would enter slow start immediately. + HostVector empty; + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(5)); + hostSet().runCallbacks(hosts_added, empty); + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + // Advance time, so that host is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + + hosts_added.push_back(host2); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks(hosts_added, empty); + + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(62000), latest_host_added_time_ms); + + // host2 is 12 secs in slow start, the weight is scaled with time factor 12 / 60 == 0.2. + simTime().advanceTimeWait(std::chrono::seconds(12)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // We expect 4:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.2 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // host2 is 20 secs in slow start, the weight is scaled with time factor 20 / 60 == 0.33. + simTime().advanceTimeWait(std::chrono::seconds(8)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // We expect 2:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.33 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // Advance time, so that there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(45)); + + // Recalculate weights. + hostSet().runCallbacks(empty, empty); + + // Now expect 1:1 ratio. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartWaitForPassingHC) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + host_set_.hosts_ = {host1}; + + init(true); + + HostVector empty; + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(1)); + hostSet().runCallbacks(hosts_added, empty); + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hosts_added.push_back(host2); + + hostSet().hosts_ = {host1, host2}; + hostSet().runCallbacks(hosts_added, empty); + + // As host1 has not passed first HC, it should not enter slow start mode. + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(7000), latest_host_added_time_ms); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().healthy_hosts_ = {host1, host2}; + // Trigger callbacks to add host1 to slow start mode. + hostSet().runCallbacks({}, {}); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + simTime().advanceTimeWait(std::chrono::seconds(4)); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 3:1 ratio, as host2 is in slow start mode, its weight is scaled with time factor + // 5 / 10 == 0.5. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Advance time, so there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(20)); + hostSet().runCallbacks({}, {}); + + // We expect 1:1 ratio, as there are no hosts in slow start mode. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartWithRuntimeAggression) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_runtime_key( + "aggression"); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_default_value(1.0); + + init(true); + EXPECT_CALL(runtime_.snapshot_, getDouble("aggression", 1.0)).WillRepeatedly(Return(1.0)); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:100", simTime(), 1)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + hostSet().healthy_hosts_[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().runCallbacks({}, {}); + + auto latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time_ms); + + // We should see 2:1:1 ratio, as hosts 2 and 3 are in slow start, their weights are scaled with + // 0.5 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + simTime().advanceTimeWait(std::chrono::seconds(4)); + HostVector hosts_added; + auto host4 = makeTestHost(info_, "tcp://127.0.0.1:110", simTime()); + hostSet().hosts_.push_back(host4); + hostSet().healthy_hosts_.push_back(host4); + EXPECT_CALL(runtime_.snapshot_, getDouble("aggression", 1.0)).WillRepeatedly(Return(1.5)); + // Recompute edf schedulers. + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time_ms = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(*lb_)); + EXPECT_EQ(std::chrono::milliseconds(10000), latest_host_added_time_ms); + + // We should see 1:1:1:0 ratio, as host 2 and 3 weight is scaled with (9/10)^(1/1.5)=0.93 factor, + // host4 weight is 0.002. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + + // host4 is 9 seconds in slow start, it's weight is scaled with (9/10)^(1/1.5)=0.93 factor. + simTime().advanceTimeWait(std::chrono::seconds(9)); + hostSet().runCallbacks({}, {}); + + // We should see 1:1:1:1 ratio, only host4 is in slow start with weight 0.93, and the rest of + // hosts are outside of slow start with weight 1. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_->chooseHost(nullptr)); +} + +TEST_P(RoundRobinLoadBalancerTest, SlowStartNoWaitNonLinearAggression) { + round_robin_lb_config_.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_runtime_key( + "aggression"); + round_robin_lb_config_.mutable_slow_start_config()->mutable_aggression()->set_default_value(2.0); + simTime().advanceTimeWait(std::chrono::seconds(1)); + + init(true); + + // As no healthcheck is configured, hosts would enter slow start immediately. + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + simTime().advanceTimeWait(std::chrono::seconds(5)); + // Host1 is 5 secs in slow start, its weight is scaled with (0.5/60)^(1/2)=0.28 factor. + hostSet().runCallbacks({}, {}); + + // Advance time, so that host1 is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + HostVector hosts_added; + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + + hosts_added.push_back(host2); + + hostSet().healthy_hosts_.push_back(host2); + hostSet().hosts_ = hostSet().healthy_hosts_; + // host2 weight is scaled with 0.004 factor. + hostSet().runCallbacks(hosts_added, {}); + + // host2 is 6 secs in slow start. + simTime().advanceTimeWait(std::chrono::seconds(6)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We expect 3:1 ratio, as host2 is 6 secs in slow start mode and it's weight is scaled with + // pow(0.1, 0.5)==0.31 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + + // host2 is 26 secs in slow start. + simTime().advanceTimeWait(std::chrono::seconds(20)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We still expect 5:3 ratio, as host2 is in slow start mode and it's weight is scaled with + // pow(0.43, 0.5)==0.65 factor. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + + // Advance time, so that there are no hosts in slow start. + simTime().advanceTimeWait(std::chrono::seconds(41)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // Now expect 1:1 ratio. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); +} + class LeastRequestLoadBalancerTest : public LoadBalancerTestBase { public: LeastRequestLoadBalancer lb_{ - priority_set_, nullptr, stats_, runtime_, random_, common_config_, least_request_lb_config_}; + priority_set_, nullptr, stats_, runtime_, random_, common_config_, least_request_lb_config_, + simTime()}; }; TEST_P(LeastRequestLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } @@ -1635,11 +1955,11 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { // Creating various load balancer objects with different choice configs. envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; lr_lb_config.mutable_choice_count()->set_value(5); - LeastRequestLoadBalancer lb_5{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_5{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; // Verify correct number of choices. @@ -1715,8 +2035,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(-1.0)); @@ -1769,8 +2089,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, common_config_, lr_lb_config}; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(0.0)); @@ -1815,6 +2135,197 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } +TEST_P(LeastRequestLoadBalancerTest, SlowStartWithDefaultParams) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + const auto slow_start_window = + EdfLoadBalancerBasePeer::slowStartWindow(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), slow_start_window); + const auto aggression = + EdfLoadBalancerBasePeer::aggression(static_cast(lb_2)); + EXPECT_EQ(1.0, aggression); + const auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); +} + +TEST_P(LeastRequestLoadBalancerTest, SlowStartNoWait) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(60); + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + simTime().advanceTimeWait(std::chrono::seconds(1)); + + // As no healthcheck is configured, hosts would enter slow start immediately. + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + simTime().advanceTimeWait(std::chrono::seconds(5)); + // Host1 is 5 secs in slow start, its weight is scaled with (5/60)^1=0.08 factor. + hostSet().runCallbacks({}, {}); + + auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(1000), latest_host_added_time); + + // Advance time, so that host is no longer in slow start. + simTime().advanceTimeWait(std::chrono::seconds(56)); + + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hostSet().healthy_hosts_.push_back(host2); + hostSet().hosts_ = hostSet().healthy_hosts_; + HostVector hosts_added; + hosts_added.push_back(host2); + + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(62000), latest_host_added_time); + + // host2 is 20 secs in slow start, the weight is scaled with time factor 20 / 60 == 0.16. + simTime().advanceTimeWait(std::chrono::seconds(10)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + + // We expect 3:1 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.16 factor and host1 weight with 0.5 factor (due to active request bias). + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // host2 is 50 secs in slow start, the weight is scaled with time factor 40 / 60 == 0.66. + simTime().advanceTimeWait(std::chrono::seconds(30)); + + // Recalculate weights. + hostSet().runCallbacks({}, {}); + + // We expect 4:3 ratio, as host2 is in slow start mode and it's weight is scaled with + // 0.66 factor and host1 weight with 0.5 factor. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, SlowStartWaitForPassingHC) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_slow_start_config()->mutable_slow_start_window()->set_seconds(10); + lr_lb_config.mutable_slow_start_config()->mutable_aggression()->set_runtime_key("aggression"); + lr_lb_config.mutable_slow_start_config()->mutable_aggression()->set_default_value(0.9); + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(0.9); + + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config, simTime()}; + + simTime().advanceTimeWait(std::chrono::seconds(1)); + auto host1 = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + host_set_.hosts_ = {host1}; + + HostVector hosts_added; + hosts_added.push_back(host1); + simTime().advanceTimeWait(std::chrono::seconds(1)); + hostSet().runCallbacks(hosts_added, {}); + + auto latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(0), latest_host_added_time); + + simTime().advanceTimeWait(std::chrono::seconds(5)); + + hosts_added.clear(); + auto host2 = makeTestHost(info_, "tcp://127.0.0.1:90", simTime()); + hosts_added.push_back(host2); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthyHosts(); + hostSet().runCallbacks(hosts_added, {}); + + latest_host_added_time = + EdfLoadBalancerBasePeer::latestHostAddedTime(static_cast(lb_2)); + EXPECT_EQ(std::chrono::milliseconds(7000), latest_host_added_time); + + simTime().advanceTimeWait(std::chrono::seconds(1)); + host1->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC); + hostSet().healthy_hosts_ = {host1, host2}; + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + + hostSet().healthy_hosts_ = {host1, host2}; + hostSet().hosts_ = hostSet().healthyHosts(); + + // Trigger callbacks to add host1 to slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 11:2 ratio, as host2 is in slow start mode, its weight is scaled with factor + // pow(0.1, 1.11)=0.07. Host1 is 7 seconds in slow start and its weight is scaled with active + // request and time bias 0.53 * pow(0.7, 1.11) = 0.36. + + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + + simTime().advanceTimeWait(std::chrono::seconds(3)); + host1->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + // Trigger callbacks to remove host1 from slow start mode. + hostSet().runCallbacks({}, {}); + + // We expect 3:5 ratio, as host2 is 4 seconds in slow start, its weight is scaled with factor + // pow(0.4, 1.11)=0.36. Host1 is not in slow start and its weight is scaled with active + // request bias = 0.53. + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + + // Host2 is 7 seconds in slow start, the weight is scaled with time factor 7 / 10 == 0.6. + simTime().advanceTimeWait(std::chrono::seconds(3)); + + hostSet().runCallbacks({}, {}); + + // We expect 6:5 ratio, as host2 is in slow start mode, its weight is scaled with time factor + // pow(0.7, 1.11)=0.67. Host1 weight is scaled with active request bias = 0.53. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); +} + INSTANTIATE_TEST_SUITE_P(PrimaryOrFailover, LeastRequestLoadBalancerTest, ::testing::Values(true, false)); diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 28ded32dd029..22d081562f1a 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -74,11 +74,13 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { ClusterStats stats{ClusterInfoImpl::generateStats(stats_store, stat_names)}; stats.max_host_weight_.set(weight); NiceMock runtime; + auto time_source = std::make_unique>(); Random::RandomGeneratorImpl random; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config; LeastRequestLoadBalancer lb_{ - priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config}; + priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config, + *time_source}; absl::node_hash_map host_hits; const uint64_t total_requests = 100; diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index bd170104bb95..6fc86b2e056a 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -188,6 +188,12 @@ TEST_F(OriginalDstClusterTest, NoContext) { EXPECT_CALL(dispatcher_, post(_)).Times(0); HostConstSharedPtr host = lb.chooseHost(&lb_context); EXPECT_EQ(host, nullptr); + + EXPECT_EQ(nullptr, lb.peekAnotherHost(nullptr)); + EXPECT_FALSE(lb.lifetimeCallbacks().has_value()); + std::vector hash_key; + auto mock_host = std::make_shared>(); + EXPECT_FALSE(lb.selectExistingConnection(nullptr, *mock_host, hash_key).has_value()); } // Downstream connection is not using original dst => no host. diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index 9d5b2c4141ef..99b9d402e6b4 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -96,6 +96,24 @@ INSTANTIATE_TEST_SUITE_P(RingHashPrimaryOrFailover, RingHashFailoverTest, ::test TEST_P(RingHashLoadBalancerTest, NoHost) { init(); EXPECT_EQ(nullptr, lb_->factory()->create()->chooseHost(nullptr)); + + EXPECT_EQ(nullptr, lb_->factory()->create()->peekAnotherHost(nullptr)); + EXPECT_FALSE(lb_->factory()->create()->lifetimeCallbacks().has_value()); + std::vector hash_key; + auto mock_host = std::make_shared>(); + EXPECT_FALSE(lb_->factory() + ->create() + ->selectExistingConnection(nullptr, *mock_host, hash_key) + .has_value()); +} + +TEST_P(RingHashLoadBalancerTest, BaseMethods) { + init(); + EXPECT_EQ(nullptr, lb_->peekAnotherHost(nullptr)); + EXPECT_FALSE(lb_->lifetimeCallbacks().has_value()); + std::vector hash_key; + auto mock_host = std::make_shared>(); + EXPECT_FALSE(lb_->selectExistingConnection(nullptr, *mock_host, hash_key).has_value()); }; TEST_P(RingHashLoadBalancerTest, SelectOverrideHost) { diff --git a/test/common/upstream/round_robin_load_balancer_fuzz.proto b/test/common/upstream/round_robin_load_balancer_fuzz.proto index a5ecf67ccc1c..60da8d643768 100644 --- a/test/common/upstream/round_robin_load_balancer_fuzz.proto +++ b/test/common/upstream/round_robin_load_balancer_fuzz.proto @@ -4,9 +4,11 @@ syntax = "proto3"; package test.common.upstream; import "validate/validate.proto"; +import "envoy/config/cluster/v3/cluster.proto"; import "test/common/upstream/zone_aware_load_balancer_fuzz.proto"; message RoundRobinLoadBalancerTestCase { test.common.upstream.ZoneAwareLoadBalancerTestCase zone_aware_load_balancer_test_case = 1 [(validate.rules).message.required = true]; + envoy.config.cluster.v3.Cluster.RoundRobinLbConfig round_robin_lb_config = 2; } diff --git a/test/common/upstream/round_robin_load_balancer_fuzz_test.cc b/test/common/upstream/round_robin_load_balancer_fuzz_test.cc index 4c1809a9a223..75a456f44c87 100644 --- a/test/common/upstream/round_robin_load_balancer_fuzz_test.cc +++ b/test/common/upstream/round_robin_load_balancer_fuzz_test.cc @@ -31,7 +31,8 @@ DEFINE_PROTO_FUZZER(const test::common::upstream::RoundRobinLoadBalancerTestCase zone_aware_load_balancer_fuzz.local_priority_set_.get(), zone_aware_load_balancer_fuzz.stats_, zone_aware_load_balancer_fuzz.runtime_, zone_aware_load_balancer_fuzz.random_, - zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config()); + zone_aware_load_balancer_test_case.load_balancer_test_case().common_lb_config(), + input.round_robin_lb_config(), zone_aware_load_balancer_fuzz.simTime()); } catch (EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException; {}", e.what()); return; diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 169202a44803..133d15cacba9 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -201,7 +201,8 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, lb_ = std::make_shared( lb_type_, priority_set_, nullptr, stats_, *scope_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + ring_hash_lb_config_, maglev_lb_config_, round_robin_lb_config_, least_request_lb_config_, + common_config_, simTime()); } void zoneAwareInit(const std::vector& host_metadata_per_locality, @@ -248,10 +249,10 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, std::make_shared(), HostsPerLocalityImpl::empty()), {}, {}, {}, absl::nullopt); - lb_ = std::make_shared(lb_type_, priority_set_, &local_priority_set_, - stats_, *scope_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, - least_request_lb_config_, common_config_); + lb_ = std::make_shared( + lb_type_, priority_set_, &local_priority_set_, stats_, *scope_, runtime_, random_, + subset_info_, ring_hash_lb_config_, maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } HostSharedPtr makeHost(const std::string& url, const HostMetadata& metadata) { @@ -475,6 +476,7 @@ class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, envoy::config::cluster::v3::Cluster::RingHashLbConfig ring_hash_lb_config_; envoy::config::cluster::v3::Cluster::MaglevLbConfig maglev_lb_config_; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; + envoy::config::cluster::v3::Cluster::RoundRobinLbConfig round_robin_lb_config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; NiceMock random_; @@ -497,6 +499,12 @@ TEST_F(SubsetLoadBalancerTest, NoFallback) { EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); EXPECT_EQ(0U, stats_.lb_subsets_fallback_.value()); EXPECT_EQ(0U, stats_.lb_subsets_selected_.value()); + + EXPECT_EQ(nullptr, lb_->peekAnotherHost(nullptr)); + EXPECT_FALSE(lb_->lifetimeCallbacks().has_value()); + std::vector hash_key; + auto mock_host = std::make_shared>(); + EXPECT_FALSE(lb_->selectExistingConnection(nullptr, *mock_host, hash_key).has_value()); } TEST_F(SubsetLoadBalancerTest, SelectOverrideHost) { @@ -1458,9 +1466,10 @@ TEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) { host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context_version({{"version", "1.0"}}); @@ -1877,9 +1886,10 @@ TEST_F(SubsetLoadBalancerTest, DisabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1900,9 +1910,10 @@ TEST_F(SubsetLoadBalancerTest, DoesNotCheckHostHealth) { EXPECT_CALL(*mock_host, weight()).WillRepeatedly(Return(1)); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { @@ -1923,9 +1934,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledLocalityWeightAwareness) { }, host_set_, {1, 100}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); @@ -1958,9 +1970,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeights) { }, host_set_, {50, 50}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.1"}}); // Since we scale the locality weights by number of hosts removed, we expect to see the second @@ -2003,9 +2016,10 @@ TEST_F(SubsetLoadBalancerTest, EnabledScaleLocalityWeightsRounding) { }, host_set_, {2, 2}); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); TestLoadBalancerContext context({{"version", "1.0"}}); // We expect to see a 33/66 split because 2 * 1 / 2 = 1 and 2 * 3 / 4 = 1.5 -> 2 @@ -2035,9 +2049,10 @@ TEST_F(SubsetLoadBalancerTest, ScaleLocalityWeightsWithNoLocalityWeights) { }, host_set_); - lb_ = std::make_shared( - lb_type_, priority_set_, nullptr, stats_, stats_store_, runtime_, random_, subset_info_, - ring_hash_lb_config_, maglev_lb_config_, least_request_lb_config_, common_config_); + lb_ = std::make_shared(lb_type_, priority_set_, nullptr, stats_, stats_store_, + runtime_, random_, subset_info_, ring_hash_lb_config_, + maglev_lb_config_, round_robin_lb_config_, + least_request_lb_config_, common_config_, simTime()); } TEST_P(SubsetLoadBalancerTest, GaugesUpdatedOnDestroy) { diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 98b36b891f1d..b177712ca4cc 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -39,6 +39,8 @@ #include "test/mocks/upstream/cluster_manager.h" #include "test/mocks/upstream/health_checker.h" #include "test/mocks/upstream/priority_set.h" +#include "test/mocks/upstream/thread_aware_load_balancer.h" +#include "test/mocks/upstream/typed_load_balancer_factory.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" #include "test/test_common/test_runtime.h" @@ -2079,6 +2081,138 @@ TEST_F(StaticClusterImplTest, UnsupportedLBType) { EnvoyException, "invalid value \"fakelbtype\""); } +// load_balancing_policy should be used when lb_policy is set to LOAD_BALANCING_POLICY_CONFIG. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + +// load_balancing_policy should also be used when lb_policy is set to something else besides +// LOAD_BALANCING_POLICY_CONFIG. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithOtherLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + lb_policy: ROUND_ROBIN + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + +// load_balancing_policy should also be used when lb_policy is omitted. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithoutLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + TEST_F(StaticClusterImplTest, MalformedHostIP) { const std::string yaml = R"EOF( name: name @@ -2177,6 +2311,32 @@ TEST_F(StaticClusterImplTest, SourceAddressPriority) { } } +// LEDS is not supported with a static cluster at the moment. +TEST_F(StaticClusterImplTest, LedsUnsupported) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + leds_cluster_locality_config: + leds_collection_name: xdstp://foo/leds_collection_name + )EOF"; + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + EXPECT_THROW_WITH_MESSAGE( + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), false), + EnvoyException, + "LEDS is only supported when EDS is used. Static cluster staticcluster cannot use LEDS."); +} + class ClusterImplTest : public testing::Test, public UpstreamImplTestBase {}; // Test that the correct feature() is set when close_connections_on_host_health_failure is @@ -2813,6 +2973,33 @@ TEST_F(ClusterInfoImplTest, DefaultConnectTimeout) { EXPECT_EQ(std::chrono::seconds(5), cluster->info()->connectTimeout()); } +TEST_F(ClusterInfoImplTest, MaxConnectionDurationTest) { + const std::string yaml_base = R"EOF( + name: {} + type: STRICT_DNS + lb_policy: ROUND_ROBIN + )EOF"; + + const std::string yaml_set_max_connection_duration = yaml_base + R"EOF( + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http_protocol_options: {{}} + common_http_protocol_options: + max_connection_duration: {} + )EOF"; + + auto cluster1 = makeCluster(fmt::format(yaml_base, "cluster1")); + EXPECT_EQ(absl::nullopt, cluster1->info()->maxConnectionDuration()); + + auto cluster2 = makeCluster(fmt::format(yaml_set_max_connection_duration, "cluster2", "9s")); + EXPECT_EQ(std::chrono::seconds(9), cluster2->info()->maxConnectionDuration()); + + auto cluster3 = makeCluster(fmt::format(yaml_set_max_connection_duration, "cluster3", "0s")); + EXPECT_EQ(absl::nullopt, cluster3->info()->maxConnectionDuration()); +} + TEST_F(ClusterInfoImplTest, Timeouts) { const std::string yaml = R"EOF( name: name @@ -3943,58 +4130,6 @@ TEST(HostPartitionTest, PartitionHosts) { EXPECT_EQ(hosts[4], update_hosts_params.excluded_hosts_per_locality->get()[1][1]); } -// Verifies that partitionHosts correctly splits hosts based on their health flags when -// "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster" is disabled. -TEST(HostPartitionTest, PartitionHostsImmediateFailureExcludeDisabled) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster", "false"}}); - - std::shared_ptr info{new NiceMock()}; - auto time_source = std::make_unique>(); - HostVector hosts{makeTestHost(info, "tcp://127.0.0.1:80", *time_source), - makeTestHost(info, "tcp://127.0.0.1:81", *time_source), - makeTestHost(info, "tcp://127.0.0.1:82", *time_source), - makeTestHost(info, "tcp://127.0.0.1:83", *time_source), - makeTestHost(info, "tcp://127.0.0.1:84", *time_source)}; - - hosts[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); - hosts[1]->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC); - hosts[2]->healthFlagSet(Host::HealthFlag::PENDING_ACTIVE_HC); - hosts[2]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); - hosts[4]->healthFlagSet(Host::HealthFlag::EXCLUDED_VIA_IMMEDIATE_HC_FAIL); - hosts[4]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); - - auto hosts_per_locality = - makeHostsPerLocality({{hosts[0], hosts[1]}, {hosts[2], hosts[3], hosts[4]}}); - - auto update_hosts_params = - HostSetImpl::partitionHosts(std::make_shared(hosts), hosts_per_locality); - - EXPECT_EQ(5, update_hosts_params.hosts->size()); - EXPECT_EQ(1, update_hosts_params.healthy_hosts->get().size()); - EXPECT_EQ(hosts[3], update_hosts_params.healthy_hosts->get()[0]); - EXPECT_EQ(1, update_hosts_params.degraded_hosts->get().size()); - EXPECT_EQ(hosts[1], update_hosts_params.degraded_hosts->get()[0]); - EXPECT_EQ(1, update_hosts_params.excluded_hosts->get().size()); - EXPECT_EQ(hosts[2], update_hosts_params.excluded_hosts->get()[0]); - - EXPECT_EQ(2, update_hosts_params.hosts_per_locality->get()[0].size()); - EXPECT_EQ(3, update_hosts_params.hosts_per_locality->get()[1].size()); - - EXPECT_EQ(0, update_hosts_params.healthy_hosts_per_locality->get()[0].size()); - EXPECT_EQ(1, update_hosts_params.healthy_hosts_per_locality->get()[1].size()); - EXPECT_EQ(hosts[3], update_hosts_params.healthy_hosts_per_locality->get()[1][0]); - - EXPECT_EQ(1, update_hosts_params.degraded_hosts_per_locality->get()[0].size()); - EXPECT_EQ(0, update_hosts_params.degraded_hosts_per_locality->get()[1].size()); - EXPECT_EQ(hosts[1], update_hosts_params.degraded_hosts_per_locality->get()[0][0]); - - EXPECT_EQ(0, update_hosts_params.excluded_hosts_per_locality->get()[0].size()); - EXPECT_EQ(1, update_hosts_params.excluded_hosts_per_locality->get()[1].size()); - EXPECT_EQ(hosts[2], update_hosts_params.excluded_hosts_per_locality->get()[1][0]); -} - TEST_F(ClusterInfoImplTest, MaxRequestsPerConnectionValidation) { const std::string yaml = R"EOF( name: cluster1 diff --git a/test/common/upstream/zone_aware_load_balancer_fuzz_base.h b/test/common/upstream/zone_aware_load_balancer_fuzz_base.h index be4a9ecb9a05..9e455027f312 100644 --- a/test/common/upstream/zone_aware_load_balancer_fuzz_base.h +++ b/test/common/upstream/zone_aware_load_balancer_fuzz_base.h @@ -1,12 +1,14 @@ #pragma once #include "test/mocks/upstream/priority_set.h" +#include "test/test_common/simulated_time_system.h" #include "load_balancer_fuzz_base.h" namespace Envoy { namespace Upstream { -class ZoneAwareLoadBalancerFuzzBase : public LoadBalancerFuzzBase { +class ZoneAwareLoadBalancerFuzzBase : public Event::TestUsingSimulatedTime, + public LoadBalancerFuzzBase { public: ZoneAwareLoadBalancerFuzzBase(bool need_local_cluster, const std::string& random_bytestring) : random_bytestring_(random_bytestring) { diff --git a/test/common/watchdog/BUILD b/test/common/watchdog/BUILD index e1539697f667..e5c085ccb660 100644 --- a/test/common/watchdog/BUILD +++ b/test/common/watchdog/BUILD @@ -23,7 +23,7 @@ envoy_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) @@ -38,6 +38,6 @@ envoy_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3:pkg_cc_proto", ], ) diff --git a/test/common/watchdog/abort_action_config_test.cc b/test/common/watchdog/abort_action_config_test.cc index f456687abc02..de534f8b3312 100644 --- a/test/common/watchdog/abort_action_config_test.cc +++ b/test/common/watchdog/abort_action_config_test.cc @@ -1,6 +1,6 @@ #include "envoy/registry/registry.h" #include "envoy/server/guarddog_config.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/watchdog/abort_action_config.h" @@ -28,8 +28,8 @@ TEST(AbortActionFactoryTest, CanCreateAction) { "config": { "name": "envoy.watchdog.abort_action", "typed_config": { - "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", - "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3alpha.AbortActionConfig", + "@type": "type.googleapis.com/xds.type.v3.TypedStruct", + "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3.AbortActionConfig", "value": { "wait_duration": "2s", } diff --git a/test/common/watchdog/abort_action_test.cc b/test/common/watchdog/abort_action_test.cc index ebdbb8c87031..bfb83a6846c6 100644 --- a/test/common/watchdog/abort_action_test.cc +++ b/test/common/watchdog/abort_action_test.cc @@ -6,7 +6,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" -#include "envoy/watchdog/v3alpha/abort_action.pb.h" +#include "envoy/watchdog/v3/abort_action.pb.h" #include "source/common/watchdog/abort_action.h" #include "source/common/watchdog/abort_action_config.h" @@ -21,7 +21,7 @@ namespace Envoy { namespace Watchdog { namespace { -using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; +using AbortActionConfig = envoy::watchdog::v3::AbortActionConfig; class AbortActionTest : public testing::Test { protected: diff --git a/test/config/integration/server_xds.lds.typed_struct.yaml b/test/config/integration/server_xds.lds.typed_struct.yaml index 27e29f620979..577ad3f70f8c 100644 --- a/test/config/integration/server_xds.lds.typed_struct.yaml +++ b/test/config/integration/server_xds.lds.typed_struct.yaml @@ -10,7 +10,7 @@ resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct + "@type": type.googleapis.com/xds.type.v3.TypedStruct type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" value: codec_type: HTTP2 diff --git a/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml b/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml index 5da3e5cb9439..f6a7eb52bbdf 100644 --- a/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml +++ b/test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml @@ -10,7 +10,7 @@ resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct + "@type": type.googleapis.com/xds.type.v3.TypedStruct type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" value: codec_type: HTTP2 diff --git a/test/config/utility.cc b/test/config/utility.cc index 208e27a70c1c..46f50346201b 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -508,6 +508,39 @@ ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::str return cluster_load_assignment; } +envoy::config::endpoint::v3::ClusterLoadAssignment +ConfigHelper::buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& leds_collection_name) { + API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; + TestUtility::loadFromYaml(fmt::format(R"EOF( + cluster_name: {} + endpoints: + leds_cluster_locality_config: + leds_config: + resource_api_version: V3 + ads: {{}} + leds_collection_name: {} + )EOF", + name, leds_collection_name), + cluster_load_assignment); + return cluster_load_assignment; +} + +envoy::config::endpoint::v3::LbEndpoint ConfigHelper::buildLbEndpoint(const std::string& address, + uint32_t port) { + API_NO_BOOST(envoy::config::endpoint::v3::LbEndpoint) lb_endpoint; + TestUtility::loadFromYaml(fmt::format(R"EOF( + endpoint: + address: + socket_address: + address: {} + port_value: {} + )EOF", + address, port), + lb_endpoint); + return lb_endpoint; +} + envoy::config::listener::v3::Listener ConfigHelper::buildBaseListener(const std::string& name, const std::string& address, const std::string& filter_chains) { @@ -620,6 +653,15 @@ ConfigHelper::ConfigHelper(const Network::Address::IpVersion version, Api::Api& } } +void ConfigHelper::addListenerTypedMetadata(absl::string_view key, ProtobufWkt::Any& packed_value) { + RELEASE_ASSERT(!finalized_, ""); + auto* static_resources = bootstrap_.mutable_static_resources(); + ASSERT_TRUE(static_resources->listeners_size() > 0); + auto* listener = static_resources->mutable_listeners(0); + auto* map = listener->mutable_metadata()->mutable_typed_filter_metadata(); + (*map)[std::string(key)] = packed_value; +}; + void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, absl::string_view cluster_name) { RELEASE_ASSERT(!finalized_, ""); @@ -683,9 +725,11 @@ void ConfigHelper::applyConfigModifiers() { config_modifiers_.clear(); } -void ConfigHelper::configureUpstreamTls(bool use_alpn, bool http3, - bool use_alternate_protocols_cache) { - addConfigModifier([use_alpn, http3, use_alternate_protocols_cache]( +void ConfigHelper::configureUpstreamTls( + bool use_alpn, bool http3, + absl::optional + alternate_protocol_cache_config) { + addConfigModifier([use_alpn, http3, alternate_protocol_cache_config]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); @@ -716,10 +760,13 @@ void ConfigHelper::configureUpstreamTls(bool use_alpn, bool http3, new_protocol_options.mutable_auto_config()->mutable_http3_protocol_options()->MergeFrom( old_protocol_options.explicit_http_config().http3_protocol_options()); } - if (use_alternate_protocols_cache) { + if (alternate_protocol_cache_config.has_value()) { new_protocol_options.mutable_auto_config() ->mutable_alternate_protocols_cache_options() ->set_name("default_alternate_protocols_cache"); + new_protocol_options.mutable_auto_config() + ->mutable_alternate_protocols_cache_options() + ->CopyFrom(alternate_protocol_cache_config.value()); } (*cluster->mutable_typed_extension_protocol_options()) ["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] @@ -750,11 +797,6 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } -void ConfigHelper::enableDeprecatedV2Api() { - addRuntimeOverride("envoy.test_only.broken_in_production.enable_deprecated_v2_api", "true"); - addRuntimeOverride("envoy.features.enable_all_deprecated_features", "true"); -} - void ConfigHelper::setProtocolOptions(envoy::config::cluster::v3::Cluster& cluster, HttpProtocolOptions& protocol_options) { if (cluster.typed_extension_protocol_options().contains( diff --git a/test/config/utility.h b/test/config/utility.h index 061726d8aff0..bc60c0cc8154 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -160,6 +160,13 @@ class ConfigHelper { static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name, const std::string& ip_version, uint32_t port); + static envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& leds_collection_name); + + static envoy::config::endpoint::v3::LbEndpoint buildLbEndpoint(const std::string& address, + uint32_t port); + static envoy::config::listener::v3::Listener buildBaseListener(const std::string& name, const std::string& address, const std::string& filter_chains = ""); @@ -296,7 +303,8 @@ class ConfigHelper { // Configure Envoy to do TLS to upstream. void configureUpstreamTls(bool use_alpn = false, bool http3 = false, - bool use_alternate_protocols_cache = false); + absl::optional + alternate_protocol_cache_config = {}); // Skip validation that ensures that all upstream ports are referenced by the // configuration generated in ConfigHelper::finalize. @@ -305,8 +313,8 @@ class ConfigHelper { // Add this key value pair to the static runtime. void addRuntimeOverride(const std::string& key, const std::string& value); - // Enable deprecated v2 API resources via the runtime. - void enableDeprecatedV2Api(); + // Add typed_filter_metadata to the first listener. + void addListenerTypedMetadata(absl::string_view key, ProtobufWkt::Any& packed_value); // Add filter_metadata to a cluster with the given name void addClusterFilterMetadata(absl::string_view metadata_yaml, diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 876a501bd56a..d6f8d800f9b6 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -27,6 +27,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AtLeast; using testing::Invoke; using testing::NiceMock; using testing::Return; @@ -96,6 +97,12 @@ class ConfigTest { return snapshot_; })); + // For configuration/example tests we don't fail if WIP APIs are used. + EXPECT_CALL(server_.validation_context_.static_validation_visitor_, onWorkInProgress(_)) + .Times(AtLeast(0)); + EXPECT_CALL(server_.validation_context_.dynamic_validation_visitor_, onWorkInProgress(_)) + .Times(AtLeast(0)); + envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig( bootstrap, options_, server_.messageValidationContext().staticValidationVisitor(), *api_); diff --git a/test/extensions/access_loggers/common/grpc_access_logger_test.cc b/test/extensions/access_loggers/common/grpc_access_logger_test.cc index 168a74905216..ec6e35ab635e 100644 --- a/test/extensions/access_loggers/common/grpc_access_logger_test.cc +++ b/test/extensions/access_loggers/common/grpc_access_logger_test.cc @@ -322,9 +322,9 @@ class MockGrpcAccessLoggerCache createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig&, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, Stats::Scope& scope) override { + Event::Dispatcher& dispatcher) override { return std::make_shared( - std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, + std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope_, "mock_access_log_prefix.", mockMethodDescriptor()); } }; @@ -354,38 +354,31 @@ class GrpcAccessLoggerCacheTest : public testing::Test { }; TEST_F(GrpcAccessLoggerCacheTest, Deduplication) { - Stats::IsolatedStoreImpl scope; - envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig config; config.set_log_name("log-1"); config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-1"); expectClientCreation(); MockGrpcAccessLoggerImpl::SharedPtr logger1 = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope); - EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); + EXPECT_EQ(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); // Do not deduplicate different types of logger expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::TCP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::TCP)); // Changing log name leads to another logger. config.set_log_name("log-2"); expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); config.set_log_name("log-1"); - EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_EQ(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); // Changing cluster name leads to another logger. config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-2"); expectClientCreation(); - EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); + EXPECT_NE(logger1, logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP)); } } // namespace diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 737bbf3982f6..3ea77be37f64 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -156,7 +156,7 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); GrpcAccessLoggerSharedPtr logger = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); // Note that the local info node() method is mocked, so the node is not really configurable. grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( identifier: diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 61946a9ed4b6..481c249c779c 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -45,7 +45,7 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type)); }; // Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 @@ -59,10 +59,10 @@ TEST(HttpGrpcAccessLog, TlsLifetimeCheck) { envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; config.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _)) .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& common_config, - Common::GrpcAccessLoggerType type, Stats::Scope&) { + Common::GrpcAccessLoggerType type) { // This is a part of the actual getOrCreateLogger code path and shouldn't crash. std::make_pair(MessageUtil::hash(common_config), type); return nullptr; @@ -70,7 +70,7 @@ TEST(HttpGrpcAccessLog, TlsLifetimeCheck) { // Set tls callback in the HttpGrpcAccessLog constructor, // but it is not called yet since we have defer_data_ = true. const auto access_log = std::make_unique(AccessLog::FilterPtr{filter}, - config, tls, logger_cache, scope); + config, tls, logger_cache); // Intentionally make access_log die earlier in this scope to simulate the situation where the // creator has been deleted yet the tls callback is not called yet. } @@ -88,17 +88,17 @@ class HttpGrpcAccessLogTest : public testing::Test { config_.mutable_common_config()->add_filter_state_objects_to_log("serialized"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); access_log_ = std::make_unique(AccessLog::FilterPtr{filter_}, config_, tls_, - logger_cache_, scope_); + logger_cache_); } void expectLog(const std::string& expected_log_entry_yaml) { diff --git a/test/extensions/access_loggers/grpc/tcp_config_test.cc b/test/extensions/access_loggers/grpc/tcp_config_test.cc index 7a2c5f50b200..b88f752d8609 100644 --- a/test/extensions/access_loggers/grpc/tcp_config_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_config_test.cc @@ -81,7 +81,7 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type)); }; // Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 @@ -95,18 +95,18 @@ TEST(TcpGrpcAccessLog, TlsLifetimeCheck) { envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config; config.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _)) .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& common_config, - Common::GrpcAccessLoggerType type, Stats::Scope&) { + Common::GrpcAccessLoggerType type) { // This is a part of the actual getOrCreateLogger code path and shouldn't crash. std::make_pair(MessageUtil::hash(common_config), type); return nullptr; }); // Set tls callback in the TcpGrpcAccessLog constructor, // but it is not called yet since we have defer_data_ = true. - const auto access_log = std::make_unique(AccessLog::FilterPtr{filter}, config, - tls, logger_cache, scope); + const auto access_log = + std::make_unique(AccessLog::FilterPtr{filter}, config, tls, logger_cache); // Intentionally make access_log die earlier in this scope to simulate the situation where the // creator has been deleted yet the tls callback is not called yet. } diff --git a/test/extensions/access_loggers/open_telemetry/BUILD b/test/extensions/access_loggers/open_telemetry/BUILD index e51a9458ef86..d77fbfb8e09c 100644 --- a/test/extensions/access_loggers/open_telemetry/BUILD +++ b/test/extensions/access_loggers/open_telemetry/BUILD @@ -58,7 +58,7 @@ envoy_extension_cc_test( "//source/extensions/access_loggers/open_telemetry:config", "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", ], ) @@ -76,7 +76,7 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/open_telemetry/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@opentelemetry_proto//:logs_cc_proto", ], diff --git a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc index f815e3a3bd98..8ddbe5b1d91b 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc @@ -52,7 +52,7 @@ class MockGrpcAccessLoggerCache : public GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type)); }; class AccessLogTest : public testing::Test { @@ -82,17 +82,16 @@ string_value: "x-request-header: %REQ(x-request-header)%, protocol: %PROTOCOL%" config_.mutable_common_config()->set_log_name("test_log"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); - access_log_ = - std::make_unique(FilterPtr{filter_}, config_, tls_, logger_cache_, scope_); + access_log_ = std::make_unique(FilterPtr{filter_}, config_, tls_, logger_cache_); } void expectLog(const std::string& expected_log_entry_yaml) { @@ -111,7 +110,7 @@ string_value: "x-request-header: %REQ(x-request-header)%, protocol: %PROTOCOL%" Stats::IsolatedStoreImpl scope_; MockFilter* filter_{new NiceMock()}; NiceMock tls_; - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig config_; + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config_; std::shared_ptr logger_{new MockGrpcAccessLogger()}; std::shared_ptr logger_cache_{new MockGrpcAccessLoggerCache()}; AccessLogPtr access_log_; diff --git a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc index ce52155a6dc9..4d0a16548516 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc @@ -1,6 +1,6 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "source/common/buffer/zero_copy_input_stream_impl.h" @@ -71,7 +71,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, auto* access_log = hcm.add_access_log(); access_log->set_name("grpc_accesslog"); - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); diff --git a/test/extensions/access_loggers/open_telemetry/config_test.cc b/test/extensions/access_loggers/open_telemetry/config_test.cc index 35017d283154..30db7a23d947 100644 --- a/test/extensions/access_loggers/open_telemetry/config_test.cc +++ b/test/extensions/access_loggers/open_telemetry/config_test.cc @@ -1,5 +1,5 @@ #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" -#include "envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.pb.h" +#include "envoy/extensions/access_loggers/open_telemetry/v3/logs_service.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/access_log_config.h" #include "envoy/stats/scope.h" @@ -46,7 +46,7 @@ class OpenTelemetryAccessLogConfigTest : public testing::Test { ::Envoy::AccessLog::FilterPtr filter_; NiceMock context_; - envoy::extensions::access_loggers::open_telemetry::v3alpha::OpenTelemetryAccessLogConfig + envoy::extensions::access_loggers::open_telemetry::v3::OpenTelemetryAccessLogConfig access_log_config_; ProtobufTypes::MessagePtr message_; Server::Configuration::AccessLogInstanceFactory* factory_{}; diff --git a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc index 5736c94515d9..b40e82c47236 100644 --- a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc @@ -179,7 +179,7 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); GrpcAccessLoggerSharedPtr logger = - logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP); grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( resource_logs: resource: diff --git a/test/extensions/bootstrap/wasm/wasm_speed_test.cc b/test/extensions/bootstrap/wasm/wasm_speed_test.cc index 075e150f0e95..aaadd74ba6e2 100644 --- a/test/extensions/bootstrap/wasm/wasm_speed_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_speed_test.cc @@ -55,8 +55,8 @@ static void bmWasmSimpleCallSpeedTest(benchmark::State& state, std::string test, plugin_config.mutable_vm_config()->set_runtime(absl::StrCat("envoy.wasm.runtime.", runtime)); auto plugin = std::make_shared( plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); - auto wasm = std::make_unique(plugin->wasmConfig(), "vm_key", - scope, cluster_manager, *dispatcher); + auto wasm = std::make_unique( + plugin->wasmConfig(), "vm_key", scope, *api, cluster_manager, *dispatcher); std::string code; if (runtime == "null") { code = "WasmSpeedCpp"; diff --git a/test/extensions/bootstrap/wasm/wasm_test.cc b/test/extensions/bootstrap/wasm/wasm_test.cc index 9286aec3d8b7..3055d0c3dd92 100644 --- a/test/extensions/bootstrap/wasm/wasm_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_test.cc @@ -56,7 +56,7 @@ class WasmTestBase { auto config = plugin_->wasmConfig(); config.allowedCapabilities() = allowed_capabilities_; config.environmentVariables() = envs_; - wasm_ = std::make_shared(config, vm_key_, scope_, + wasm_ = std::make_shared(config, vm_key_, scope_, *api_, cluster_manager, *dispatcher_); EXPECT_NE(wasm_, nullptr); wasm_->setCreateContextForTesting( diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 12d7ab43ae10..97f2ecb6983d 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -182,6 +182,13 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { EXPECT_CALL(random_, random()).WillOnce(Return(i)); EXPECT_TRUE(lb_->peekAnotherHost(nullptr) == nullptr); Upstream::HostConstSharedPtr target = lb_->chooseHost(nullptr); + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + EXPECT_FALSE(lifetime_callbacks.has_value()); + std::vector hash_key = {1, 2, 3}; + absl::optional selection = + lb_->selectExistingConnection(nullptr, *host, hash_key); + EXPECT_FALSE(selection.has_value()); EXPECT_EQ(host.get(), target.get()); } diff --git a/test/extensions/clusters/dynamic_forward_proxy/BUILD b/test/extensions/clusters/dynamic_forward_proxy/BUILD index 115da27b7713..9114b74a00f0 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/test/extensions/clusters/dynamic_forward_proxy/BUILD @@ -14,10 +14,15 @@ envoy_package() envoy_extension_cc_test( name = "cluster_test", srcs = ["cluster_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], data = ["//test/extensions/transport_sockets/tls/test_data:certs"], extension_names = ["envoy.filters.http.dynamic_forward_proxy"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", + "//source/extensions/network/dns_resolver/cares:config", "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:config", "//test/common/upstream:utility_lib", diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 097dcf993cd4..4160758acafe 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -8,6 +8,8 @@ #include "test/common/upstream/utility.h" #include "test/extensions/common/dynamic_forward_proxy/mocks.h" +#include "test/mocks/http/conn_pool.h" +#include "test/mocks/network/connection.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/server/admin.h" #include "test/mocks/server/instance.h" @@ -91,6 +93,7 @@ class ClusterTest : public testing::Test, // Allow touch() to still be strict. EXPECT_CALL(*host_map_[host], address()).Times(AtLeast(0)); + EXPECT_CALL(*host_map_[host], addressList()).Times(AtLeast(0)); EXPECT_CALL(*host_map_[host], isIpAddress()).Times(AtLeast(0)); EXPECT_CALL(*host_map_[host], resolvedHost()).Times(AtLeast(0)); } @@ -148,6 +151,19 @@ connect_timeout: 0.25s name: foo dns_lookup_family: AUTO )EOF"; + + const std::string coalesce_connection_config_ = R"EOF( +name: name +connect_timeout: 0.25s +cluster_type: + name: dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + allow_coalesced_connections: true + dns_cache_config: + name: foo + dns_lookup_family: AUTO +)EOF"; }; // Basic flow of the cluster including adding hosts and removing them. @@ -158,6 +174,7 @@ TEST_F(ClusterTest, BasicFlow) { // Verify no host LB cases. EXPECT_EQ(nullptr, lb_->chooseHost(setHostAndReturnContext("foo"))); + EXPECT_EQ(nullptr, lb_->peekAnotherHost(setHostAndReturnContext("foo"))); // LB will immediately resolve host1. EXPECT_CALL(*this, onMemberUpdateCb(SizeIs(1), SizeIs(0))); @@ -200,6 +217,317 @@ TEST_F(ClusterTest, PopulatedCache) { EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); } +TEST_F(ClusterTest, LoadBalancer_LifetimeCallbacksWithoutCoalescing) { + initialize(default_yaml_config_, false); + + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_FALSE(lifetime_callbacks.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_LifetimeCallbacksWithCoalescing) { + initialize(coalesce_connection_config_, false); + + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolNoConnections) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + EXPECT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolMatchingConnection) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + std::vector dns_sans = {"www.example.org", "mail.example.org"}; + EXPECT_CALL(*ssl_info, dnsSansPeerCertificate()).WillOnce(Return(dns_sans)); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_TRUE(selection.has_value()); + EXPECT_EQ(&pool, &selection->pool_); + EXPECT_EQ(&connection, &selection->connection_); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolMatchingConnectionHttp3) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h3")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + std::vector dns_sans = {"www.example.org", "mail.example.org"}; + EXPECT_CALL(*ssl_info, dnsSansPeerCertificate()).WillOnce(Return(dns_sans)); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_TRUE(selection.has_value()); + EXPECT_EQ(&pool, &selection->pool_); + EXPECT_EQ(&connection, &selection->connection_); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolNoMatchingConnectionAfterDraining) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + + // Drain the connection then no verify that no connection is subsequently selected. + lifetime_callbacks->onConnectionDraining(pool, hash_key, connection); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolInvalidAlpn) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("hello")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolSanMismatch) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + std::vector dns_sans = {"www.example.org"}; + EXPECT_CALL(*ssl_info, dnsSansPeerCertificate()).WillOnce(Return(dns_sans)); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolHashMismatch) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.3:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + + hash_key[0]++; + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolIpMismatch) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.4:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + std::vector dns_sans = {"www.example.org", "mail.example.org"}; + EXPECT_CALL(*ssl_info, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans)); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolEmptyHostname) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.4:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = std::make_shared(); + std::vector dns_sans = {"www.example.org", "mail.example.org"}; + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + EXPECT_CALL(*ssl_info, dnsSansPeerCertificate()).WillRepeatedly(Return(dns_sans)); + + const std::string empty_hostname = ""; + Upstream::MockHost empty_host; + EXPECT_CALL(empty_host, hostname()).WillRepeatedly(testing::ReturnRef(empty_hostname)); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, empty_host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + +TEST_F(ClusterTest, LoadBalancer_SelectPoolNoSSSL) { + initialize(coalesce_connection_config_, false); + + const std::string hostname = "mail.example.org"; + Upstream::MockHost host; + EXPECT_CALL(host, hostname()).WillRepeatedly(testing::ReturnRef(hostname)); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.4:50000"); + EXPECT_CALL(host, address()).WillRepeatedly(testing::Return(address)); + std::vector hash_key = {1, 2, 3}; + + Envoy::Http::ConnectionPool::MockInstance pool; + Envoy::Network::MockConnection connection; + OptRef lifetime_callbacks = + lb_->lifetimeCallbacks(); + ASSERT_TRUE(lifetime_callbacks.has_value()); + EXPECT_CALL(connection, connectionInfoProvider()).Times(testing::AnyNumber()); + EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("h2")); + auto ssl_info = nullptr; + EXPECT_CALL(connection, ssl()).WillRepeatedly(Return(ssl_info)); + lifetime_callbacks->onConnectionOpen(pool, hash_key, connection); + + absl::optional selection = + lb_->selectExistingConnection(&lb_context_, host, hash_key); + + ASSERT_FALSE(selection.has_value()); +} + class ClusterFactoryTest : public testing::Test { protected: void createCluster(const std::string& yaml_config) { diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index bf155a412e8f..5481a6ac3a6e 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -93,10 +93,14 @@ envoy_extension_cc_test( size = "small", srcs = ["redis_cluster_integration_test.cc"], extension_names = ["envoy.clusters.redis"], + # This test takes a while to run specially under tsan. + # Shard it to avoid test timeout. + shard_count = 2, deps = [ "//source/extensions/clusters/redis:redis_cluster", "//source/extensions/clusters/redis:redis_cluster_lb", "//source/extensions/filters/network/redis_proxy:config", + "//test/integration:ads_integration_lib", "//test/integration:integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 346d47b0c344..7a0128d09f00 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -6,6 +6,7 @@ #include "source/common/common/macros.h" #include "source/extensions/filters/network/redis_proxy/command_splitter_impl.h" +#include "test/integration/ads_integration.h" #include "test/integration/integration.h" using testing::Return; @@ -624,5 +625,53 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { EXPECT_TRUE(fake_upstream_connection_2->close()); redis_client->close(); } + +// Reuse the code in AdsIntegrationTest but have a new test name so +// INSTANTIATE_TEST_SUITE_P works. +using RedisAdsIntegrationTest = AdsIntegrationTest; + +// Validates that removing a redis cluster does not crash Envoy. +// Regression test for issue https://github.com/envoyproxy/envoy/issues/7990. +TEST_P(RedisAdsIntegrationTest, RedisClusterRemoval) { + initialize(); + + // Send initial configuration with a redis cluster and a redis proxy listener. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {buildRedisCluster("redis_cluster")}, + {buildRedisCluster("redis_cluster")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"redis_cluster"}, {"redis_cluster"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("redis_cluster")}, + {buildClusterLoadAssignment("redis_cluster")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildRedisListener("listener_0", "redis_cluster")}, + {buildRedisListener("listener_0", "redis_cluster")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"redis_cluster"}, {}, {})); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + + // Validate that redis listener is successfully created. + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + // Now send a CDS update, removing redis cluster added above. + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {buildCluster("cluster_2")}, {buildCluster("cluster_2")}, + {"redis_cluster"}, "2"); + + // Validate that the cluster is removed successfully. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); +} + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, RedisAdsIntegrationTest, + ADS_INTEGRATION_PARAMS); + } // namespace } // namespace Envoy diff --git a/test/extensions/common/dynamic_forward_proxy/BUILD b/test/extensions/common/dynamic_forward_proxy/BUILD index d9101e6f94a5..1e1f39afe27f 100644 --- a/test/extensions/common/dynamic_forward_proxy/BUILD +++ b/test/extensions/common/dynamic_forward_proxy/BUILD @@ -12,13 +12,19 @@ envoy_package() envoy_cc_test( name = "dns_cache_impl_test", srcs = ["dns_cache_impl_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], deps = [ ":mocks", "//source/common/config:utility_lib", "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", "//source/extensions/common/dynamic_forward_proxy:dns_cache_manager_impl", + "//source/extensions/network/dns_resolver/cares:config", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index d29bb933d01c..c53b7a8342ce 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -7,6 +7,7 @@ #include "source/common/network/resolver_impl.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h" +#include "source/server/factory_context_base_impl.h" #include "test/extensions/common/dynamic_forward_proxy/mocks.h" #include "test/mocks/filesystem/mocks.h" @@ -34,6 +35,7 @@ namespace { class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: + DnsCacheImplTest() : registered_dns_factory_(dns_resolver_factory_) {} void initialize(std::vector preresolve_hostnames = {}, uint32_t max_hosts = 1024) { config_.set_name("foo"); config_.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V4_ONLY); @@ -48,7 +50,7 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT EXPECT_CALL(context_.dispatcher_, isThreadSafe).WillRepeatedly(Return(true)); - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)).WillOnce(Return(resolver_)); dns_cache_ = std::make_unique(context_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } @@ -80,6 +82,10 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; DnsCache::AddUpdateCallbacksHandlePtr update_callbacks_handle_; + NiceMock dns_resolver_factory_; + Registry::InjectFactory registered_dns_factory_; + std::chrono::milliseconds configured_ttl_ = std::chrono::milliseconds(60000); + std::chrono::milliseconds dns_ttl_ = std::chrono::milliseconds(6000); }; MATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, "") { @@ -104,8 +110,15 @@ MATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, "") { MATCHER(DnsHostInfoAddressIsNull, "") { return arg->address() == nullptr; } -MATCHER_P(CustomDnsResolversSizeEquals, expected_resolvers, "") { - return expected_resolvers.size() == arg.size(); +void verifyCaresDnsConfigAndUnpack( + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config, + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig& cares) { + // Verify typed DNS resolver config is c-ares. + EXPECT_EQ(typed_dns_resolver_config.name(), std::string(Network::CaresDnsResolver)); + EXPECT_EQ( + typed_dns_resolver_config.typed_config().type_url(), + "type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig"); + typed_dns_resolver_config.typed_config().UnpackTo(&cares); } TEST_F(DnsCacheImplTest, PreresolveSuccess) { @@ -162,7 +175,7 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.1"})); @@ -180,7 +193,7 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { // Address does not change. EXPECT_CALL(*timeout_timer, disableTimer()); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.1"})); @@ -200,7 +213,7 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.2:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.2"})); @@ -243,7 +256,7 @@ TEST_F(DnsCacheImplTest, ForceRefresh) { onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(6000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.1"})); @@ -279,7 +292,7 @@ TEST_F(DnsCacheImplTest, Ipv4Address) { onDnsHostAddOrUpdate("127.0.0.1", DnsHostInfoEquals("127.0.0.1:80", "127.0.0.1", true))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("127.0.0.1:80", "127.0.0.1", true))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"127.0.0.1"})); } @@ -307,7 +320,7 @@ TEST_F(DnsCacheImplTest, Ipv4AddressWithPort) { DnsHostInfoEquals("127.0.0.1:10000", "127.0.0.1", true))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("127.0.0.1:10000", "127.0.0.1", true))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"127.0.0.1"})); } @@ -333,7 +346,7 @@ TEST_F(DnsCacheImplTest, Ipv6Address) { EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("[::1]", DnsHostInfoEquals("[::1]:80", "::1", true))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("[::1]:80", "::1", true))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"::1"})); } @@ -359,7 +372,7 @@ TEST_F(DnsCacheImplTest, Ipv6AddressWithPort) { EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("[::1]:10000", DnsHostInfoEquals("[::1]:10000", "::1", true))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("[::1]:10000", "::1", true))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"::1"})); } @@ -389,15 +402,15 @@ TEST_F(DnsCacheImplTest, TTL) { onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(6000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(0))); + TestUtility::makeDnsResponse({"10.0.0.1"})); checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); - // Re-resolve with ~60s passed. TTL should still be OK at default of 5 minutes. - simTime().advanceTimeWait(std::chrono::milliseconds(60001)); + // Re-resolve with ~6s passed. The resolved entry TTL is 6s. + simTime().advanceTimeWait(std::chrono::milliseconds(6001)); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -406,15 +419,15 @@ TEST_F(DnsCacheImplTest, TTL) { 1 /* added */, 0 /* removed */, 1 /* num hosts */); EXPECT_CALL(*timeout_timer, disableTimer()); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(6000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.1"})); checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); - // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times + // Re-resolve with ~1m passed. This is not realistic as we would have re-resolved many times // during this period but it's good enough for the test. - simTime().advanceTimeWait(std::chrono::milliseconds(300000)); + simTime().advanceTimeWait(std::chrono::seconds(60000)); EXPECT_CALL(update_callbacks_, onDnsHostRemove("foo.com")); resolve_timer->invokeCallback(); checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, @@ -459,9 +472,9 @@ TEST_F(DnsCacheImplTest, TTLWithCustomParameters) { onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(30000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(0))); + TestUtility::makeDnsResponse({"10.0.0.1"})); // Re-resolve with ~30s passed. TTL should still be OK at 60s. simTime().advanceTimeWait(std::chrono::milliseconds(30001)); @@ -470,7 +483,7 @@ TEST_F(DnsCacheImplTest, TTLWithCustomParameters) { .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); resolve_timer->invokeCallback(); EXPECT_CALL(*timeout_timer, disableTimer()); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(30000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"10.0.0.1"})); @@ -509,7 +522,7 @@ TEST_F(DnsCacheImplTest, InlineResolve) { onDnsHostAddOrUpdate("localhost", DnsHostInfoEquals("127.0.0.1:80", "localhost", false))); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("127.0.0.1:80", "localhost", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); post_cb(); } @@ -536,7 +549,9 @@ TEST_F(DnsCacheImplTest, ResolveTimeout) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoAddressIsNull())); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + // The resolve timeout will be the default TTL as there was no specific TTL + // overriding. + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(configured_ttl_), _)); timeout_timer->invokeCallback(); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -566,7 +581,7 @@ TEST_F(DnsCacheImplTest, ResolveFailure) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoAddressIsNull())); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(configured_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Failure, TestUtility::makeDnsResponse({})); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -580,7 +595,7 @@ TEST_F(DnsCacheImplTest, ResolveFailure) { // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times // during this period but it's good enough for the test. - simTime().advanceTimeWait(std::chrono::milliseconds(300001)); + simTime().advanceTimeWait(std::chrono::milliseconds(600001)); // Because resolution failed for the host, onDnsHostAddOrUpdate was not called. // Therefore, onDnsHostRemove should not be called either. EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0); @@ -630,7 +645,7 @@ TEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) { // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times // during this period but it's good enough for the test. - simTime().advanceTimeWait(std::chrono::milliseconds(300001)); + simTime().advanceTimeWait(std::chrono::milliseconds(600001)); // Because resolution failed for the host, onDnsHostAddOrUpdate was not called. // Therefore, onDnsHostRemove should not be called either. EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0); @@ -662,7 +677,7 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithEmptyResult) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoAddressIsNull())); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(configured_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({})); checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 0 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -676,7 +691,7 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithEmptyResult) { // Re-resolve with ~5m passed. This is not realistic as we would have re-resolved many times // during this period but it's good enough for the test. - simTime().advanceTimeWait(std::chrono::milliseconds(300001)); + simTime().advanceTimeWait(std::chrono::milliseconds(600001)); // Because resolution failed for the host, onDnsHostAddOrUpdate was not called. // Therefore, onDnsHostRemove should not be called either. EXPECT_CALL(update_callbacks_, onDnsHostRemove(_)).Times(0); @@ -893,12 +908,14 @@ TEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) { TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSetDeprecatedField) { initialize(); config_.set_use_tcp_for_dns_lookups(true); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(resolver_))); DnsCacheImpl dns_cache_(context_, config_); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. - EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); } TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSet) { @@ -906,12 +923,14 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSet) { config_.mutable_dns_resolution_config() ->mutable_dns_resolver_options() ->set_use_tcp_for_dns_lookups(true); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(resolver_))); DnsCacheImpl dns_cache_(context_, config_); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. - EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); + EXPECT_EQ(true, cares.dns_resolver_options().use_tcp_for_dns_lookups()); } TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { @@ -919,32 +938,38 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { config_.mutable_dns_resolution_config() ->mutable_dns_resolver_options() ->set_no_default_search_domain(true); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(resolver_))); DnsCacheImpl dns_cache_(context_, config_); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); // `true` here means dns_resolver_options.no_default_search_domain is set to true. - EXPECT_EQ(true, dns_resolver_options.no_default_search_domain()); + EXPECT_EQ(true, cares.dns_resolver_options().no_default_search_domain()); } TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { initialize(); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(resolver_))); DnsCacheImpl dns_cache_(context_, config_); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); // `false` here means dns_resolver_options.use_tcp_for_dns_lookups is set to false. - EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); + EXPECT_EQ(false, cares.dns_resolver_options().use_tcp_for_dns_lookups()); } TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionUnSet) { initialize(); - envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) - .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + EXPECT_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillOnce(DoAll(SaveArg<2>(&typed_dns_resolver_config), Return(resolver_))); DnsCacheImpl dns_cache_(context_, config_); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + verifyCaresDnsConfigAndUnpack(typed_dns_resolver_config, cares); // `false` here means dns_resolver_options.no_default_search_domain is set to false. - EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); + EXPECT_EQ(false, cares.dns_resolver_options().no_default_search_domain()); } // DNS cache manager config tests. @@ -996,27 +1021,117 @@ TEST(DnsCacheConfigOptionsTest, EmtpyDnsResolutionConfig) { NiceMock context; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::shared_ptr resolver{std::make_shared()}; - - std::vector expected_empty_dns_resolvers; - EXPECT_CALL(context.dispatcher_, createDnsResolver(expected_empty_dns_resolvers, _)) + envoy::config::core::v3::TypedExtensionConfig empty_typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + empty_typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + empty_typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + EXPECT_CALL(dns_resolver_factory, + createDnsResolver(_, _, ProtoEq(empty_typed_dns_resolver_config))) .WillOnce(Return(resolver)); DnsCacheImpl dns_cache_(context, config); } +// Test dns_resolution_config is in place, use it. TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { NiceMock context; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::shared_ptr resolver{std::make_shared()}; + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 80), + resolvers); + config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(resolvers); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + EXPECT_CALL(dns_resolver_factory, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) + .WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(context, config); +} + +// Test dns_resolution_config is in place, use it and overriding use_tcp_for_dns_lookups. +TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfigOverridingUseTcp) { + NiceMock context; + std::shared_ptr resolver{std::make_shared()}; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + // setup use_tcp + config.set_use_tcp_for_dns_lookups(false); + + // setup dns_resolution_config + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers); + config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers); + config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_use_tcp_for_dns_lookups(true); + config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_no_default_search_domain(true); - envoy::config::core::v3::Address* dns_resolvers = - config.mutable_dns_resolution_config()->add_resolvers(); - dns_resolvers->mutable_socket_address()->set_address("1.2.3.4"); - dns_resolvers->mutable_socket_address()->set_port_value(8080); + // setup expected typed config parameter + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(resolvers); + cares.mutable_dns_resolver_options()->set_use_tcp_for_dns_lookups(true); + cares.mutable_dns_resolver_options()->set_no_default_search_domain(true); + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + EXPECT_CALL(dns_resolver_factory, createDnsResolver(_, _, ProtoEq(typed_dns_resolver_config))) + .WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(context, config); +} - std::vector expected_dns_resolvers; - expected_dns_resolvers.push_back(Network::Address::resolveProtoAddress(*dns_resolvers)); - EXPECT_CALL(context.dispatcher_, - createDnsResolver(CustomDnsResolversSizeEquals(expected_dns_resolvers), _)) +// Test the case that the typed_dns_resolver_config is specified, and it overrides all +// other configuration, like config.dns_resolution_config, and config.use_tcp_for_dns_lookups. +TEST(DnsCacheConfigOptionsTest, NonEmptyTypedDnsResolverConfig) { + NiceMock context; + std::shared_ptr resolver{std::make_shared()}; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + // setup dns_resolution_config + envoy::config::core::v3::Address resolvers; + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("1.2.3.4", 8080), + resolvers); + config.mutable_dns_resolution_config()->add_resolvers()->MergeFrom(resolvers); + config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_use_tcp_for_dns_lookups(false); + config.mutable_dns_resolution_config() + ->mutable_dns_resolver_options() + ->set_no_default_search_domain(false); + + // setup use_tcp_for_dns_lookups + config.set_use_tcp_for_dns_lookups(false); + + // setup typed_dns_resolver_config + Network::Utility::addressToProtobufAddress(Network::Address::Ipv4Instance("5.6.7.8", 9090), + resolvers); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(resolvers); + cares.mutable_dns_resolver_options()->set_use_tcp_for_dns_lookups(true); + cares.mutable_dns_resolver_options()->set_no_default_search_domain(true); + config.mutable_typed_dns_resolver_config()->mutable_typed_config()->PackFrom(cares); + config.mutable_typed_dns_resolver_config()->set_name(std::string(Network::CaresDnsResolver)); + + // setup the expected function call parameter. + envoy::config::core::v3::TypedExtensionConfig expected_typed_dns_resolver_config; + expected_typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + expected_typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + EXPECT_CALL(dns_resolver_factory, + createDnsResolver(_, _, ProtoEq(expected_typed_dns_resolver_config))) .WillOnce(Return(resolver)); DnsCacheImpl dns_cache_(context, config); } @@ -1066,6 +1181,10 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) { } TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_multiple_dns_addresses", "true"}}); + auto* time_source = new NiceMock(); context_.dispatcher_.time_system_.reset(time_source); @@ -1076,11 +1195,13 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>(); })); MockKeyValueStore* store{}; - EXPECT_CALL(factory, createStore(_, _, _, _)).WillOnce(Invoke([&store]() { + EXPECT_CALL(factory, createStore(_, _, _, _)).WillOnce(Invoke([this, &store]() { auto ret = std::make_unique>(); store = ret.get(); // Make sure there's an attempt to load from the key value store. - EXPECT_CALL(*store, iterate); + EXPECT_CALL(*store, iterate(_)); + // Make sure the result is sent to the worker threads. + EXPECT_CALL(context_.thread_local_, runOnAllThreads(_)).Times(2); return ret; })); @@ -1108,14 +1229,14 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { EXPECT_CALL(*timeout_timer, disableTimer()); // Make sure the store gets the first insert. + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|6|0")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(6000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); + TestUtility::makeDnsResponse({"10.0.0.1"})); checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1131,10 +1252,10 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { // Address does not change. EXPECT_CALL(*timeout_timer, disableTimer()); - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|6|0")); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); + TestUtility::makeDnsResponse({"10.0.0.1"})); checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1149,20 +1270,19 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { 1 /* added */, 0 /* removed */, 1 /* num hosts */); EXPECT_CALL(*timeout_timer, disableTimer()); - // Make sure the store gets the updated address. + // Make sure the store gets the updated addresses + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|6|0\n10.0.0.1:80|6|0")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.2:80", "foo.com", false))); - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|30|0")); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(dns_ttl_), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(30))); + TestUtility::makeDnsResponse({"10.0.0.2", "10.0.0.1"})); checkStats(3 /* attempt */, 3 /* success */, 0 /* failure */, 2 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); // Now do one more resolve, where the address does not change but the time // does. - // Re-resolve timer. EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) @@ -1171,10 +1291,82 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { // Address does not change. EXPECT_CALL(*timeout_timer, disableTimer()); - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|40|0")); - EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|40|0\n10.0.0.1:80|40|0")); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(40000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(40))); + TestUtility::makeDnsResponse({"10.0.0.2", "10.0.0.1"}, std::chrono::seconds(40))); +} + +TEST_F(DnsCacheImplTest, CacheLoad) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_multiple_dns_addresses", "true"}}); + + auto* time_source = new NiceMock(); + context_.dispatcher_.time_system_.reset(time_source); + + // Configure the cache. + MockKeyValueStoreFactory factory; + EXPECT_CALL(factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique< + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>(); + })); + MockKeyValueStore* store{}; + EXPECT_CALL(factory, createStore(_, _, _, _)).WillOnce(Invoke([&store]() { + auto ret = std::make_unique>(); + store = ret.get(); + // Make sure there's an attempt to load from the key value store. + EXPECT_CALL(*store, iterate).WillOnce(Invoke([&](KeyValueStore::ConstIterateCb fn) { + fn("foo.com", "10.0.0.2:80|40|0"); + fn("bar.com", "1.1.1.1:1|20|1\n2.2.2.2:2|30|2"); + // No port. + EXPECT_LOG_CONTAINS("warning", "Unable to parse cache line '1.1.1.1|20|1'", + fn("eep.com", "1.1.1.1|20|1")); + // Won't be loaded because of prior error. + fn("eep.com", "1.1.1.1|20|1:1"); + })); + + return ret; + })); + Registry::InjectFactory injector(factory); + config_.mutable_key_value_config()->mutable_config()->set_name("mock_key_value_store_factory"); + + initialize(); + ASSERT(store != nullptr); + EXPECT_EQ(2, TestUtility::findCounter(context_.scope_, "dns_cache.foo.cache_load")->value()); + + { + MockLoadDnsCacheEntryCallbacks callbacks; + auto result = dns_cache_->loadDnsCacheEntry("foo.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_); + EXPECT_EQ(result.handle_, nullptr); + EXPECT_NE(absl::nullopt, result.host_info_); + EXPECT_EQ(1, result.host_info_.value()->addressList().size()); + } + + { + MockLoadDnsCacheEntryCallbacks callbacks; + auto result = dns_cache_->loadDnsCacheEntry("bar.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::InCache, result.status_); + EXPECT_EQ(result.handle_, nullptr); + ASSERT_NE(absl::nullopt, result.host_info_); + EXPECT_EQ(2, result.host_info_.value()->addressList().size()); + } +} + +// Make sure the cache manager can handle the context going out of scope. +TEST(DnsCacheManagerImplTest, TestLifetime) { + NiceMock context; + std::unique_ptr cache_manager; + + { + Server::FactoryContextBaseImpl scoped_context(context); + cache_manager = std::make_unique(scoped_context); + } + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; + config1.set_name("foo"); + + EXPECT_TRUE(cache_manager->getCache(config1) != nullptr); } } // namespace diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.cc b/test/extensions/common/dynamic_forward_proxy/mocks.cc index 6b99e00bf460..148e6d456754 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.cc +++ b/test/extensions/common/dynamic_forward_proxy/mocks.cc @@ -30,6 +30,7 @@ MockDnsCacheManager::~MockDnsCacheManager() = default; MockDnsHostInfo::MockDnsHostInfo() { ON_CALL(*this, address()).WillByDefault(ReturnPointee(&address_)); + ON_CALL(*this, addressList()).WillByDefault(ReturnPointee(&address_list_)); ON_CALL(*this, resolvedHost()).WillByDefault(ReturnRef(resolved_host_)); } MockDnsHostInfo::~MockDnsHostInfo() = default; diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index 3765190ab29f..16182bb11d8f 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -88,11 +88,13 @@ class MockDnsHostInfo : public DnsHostInfo { ~MockDnsHostInfo() override; MOCK_METHOD(Network::Address::InstanceConstSharedPtr, address, (), (const)); + MOCK_METHOD(std::vector, addressList, (), (const)); MOCK_METHOD(const std::string&, resolvedHost, (), (const)); MOCK_METHOD(bool, isIpAddress, (), (const)); MOCK_METHOD(void, touch, ()); Network::Address::InstanceConstSharedPtr address_; + std::vector address_list_; std::string resolved_host_; }; diff --git a/test/extensions/common/wasm/BUILD b/test/extensions/common/wasm/BUILD index cca41d4cf939..a3f50da9f85e 100644 --- a/test/extensions/common/wasm/BUILD +++ b/test/extensions/common/wasm/BUILD @@ -33,6 +33,10 @@ envoy_cc_test( envoy_cc_test( name = "wasm_test", srcs = ["wasm_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], data = envoy_select_wasm_cpp_tests([ "//test/extensions/common/wasm/test_data:bad_signature_cpp.wasm", "//test/extensions/common/wasm/test_data:test_context_cpp.wasm", @@ -47,12 +51,14 @@ envoy_cc_test( "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/common/wasm:wasm_lib", + "//source/extensions/network/dns_resolver/cares:config", "//test/extensions/common/wasm:wasm_runtime", "//test/extensions/common/wasm/test_data:test_context_cpp_plugin", "//test/extensions/common/wasm/test_data:test_cpp_plugin", "//test/extensions/common/wasm/test_data:test_restriction_cpp_plugin", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", + "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:wasm_lib", ], diff --git a/test/extensions/common/wasm/wasm_speed_test.cc b/test/extensions/common/wasm/wasm_speed_test.cc index d9e65579547d..f2e9371267e0 100644 --- a/test/extensions/common/wasm/wasm_speed_test.cc +++ b/test/extensions/common/wasm/wasm_speed_test.cc @@ -34,7 +34,7 @@ void bmWasmSpeedTest(benchmark::State& state) { envoy::extensions::wasm::v3::PluginConfig plugin_config; *plugin_config.mutable_vm_config()->mutable_runtime() = "envoy.wasm.runtime.null"; auto config = Envoy::Extensions::Common::Wasm::WasmConfig(plugin_config); - auto wasm = std::make_unique(config, "", scope, + auto wasm = std::make_unique(config, "", scope, *api, cluster_manager, *dispatcher); auto context = std::make_shared(wasm.get()); diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 2e947bc33e81..8eb9655e1f43 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -10,6 +10,7 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/registry.h" #include "test/test_common/utility.h" #include "test/test_common/wasm_base.h" @@ -102,7 +103,7 @@ TEST_P(WasmCommonTest, WasmFailState) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto wasm = std::make_shared( - std::make_unique(plugin->wasmConfig(), "", scope, cluster_manager, *dispatcher)); + std::make_unique(plugin->wasmConfig(), "", scope, *api, cluster_manager, *dispatcher)); auto wasm_base = std::dynamic_pointer_cast(wasm); wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::UnableToCreateVm); EXPECT_EQ(toWasmEvent(wasm_base), WasmEvent::UnableToCreateVm); @@ -187,7 +188,7 @@ TEST_P(WasmCommonTest, Logging) { auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_shared(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); EXPECT_NE(wasm->buildVersion(), ""); EXPECT_NE(std::unique_ptr(wasm->createContext(plugin)), nullptr); @@ -262,7 +263,7 @@ TEST_P(WasmCommonTest, BadSignature) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", "", code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_TRUE(wasm->load(code, false)); EXPECT_FALSE(wasm->initialize()); @@ -298,7 +299,7 @@ TEST_P(WasmCommonTest, Segv) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_TRUE(wasm->load(code, false)); EXPECT_TRUE(wasm->initialize()); TestContext* root_context = nullptr; @@ -348,7 +349,7 @@ TEST_P(WasmCommonTest, DivByZero) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); auto context = std::make_unique(wasm.get()); EXPECT_TRUE(wasm->load(code, false)); @@ -393,7 +394,7 @@ TEST_P(WasmCommonTest, IntrinsicGlobals) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); EXPECT_TRUE(wasm->load(code, false)); @@ -439,7 +440,7 @@ TEST_P(WasmCommonTest, Utilities) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); EXPECT_TRUE(wasm->load(code, false)); @@ -512,7 +513,7 @@ TEST_P(WasmCommonTest, Stats) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto vm_key = proxy_wasm::makeVmKey("", vm_configuration, code); auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); EXPECT_TRUE(wasm->load(code, false)); @@ -549,7 +550,7 @@ TEST_P(WasmCommonTest, Foreign) { auto plugin = std::make_shared( plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto wasm = std::make_unique(plugin->wasmConfig(), "", scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); std::string code; if (GetParam() != "null") { @@ -569,13 +570,8 @@ TEST_P(WasmCommonTest, Foreign) { wasm->setCreateContextForTesting( nullptr, [](Wasm* wasm, const std::shared_ptr& plugin) -> ContextBase* { auto root_context = new TestContext(wasm, plugin); -#ifdef ZLIBNG_VERSION - EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("compress 2000 -> 22"))); - EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("uncompress 22 -> 2000"))); -#else EXPECT_CALL(*root_context, log_(spdlog::level::trace, Eq("compress 2000 -> 23"))); EXPECT_CALL(*root_context, log_(spdlog::level::debug, Eq("uncompress 23 -> 2000"))); -#endif return root_context; }); wasm->start(plugin); @@ -599,7 +595,7 @@ TEST_P(WasmCommonTest, OnForeign) { plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); proxy_wasm::AllowedCapabilitiesMap allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), "", scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); std::string code; if (GetParam() != "null") { @@ -650,7 +646,7 @@ TEST_P(WasmCommonTest, WASI) { auto plugin = std::make_shared( plugin_config, envoy::config::core::v3::TrafficDirection::UNSPECIFIED, local_info, nullptr); auto wasm = std::make_unique(plugin->wasmConfig(), "", scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_NE(wasm, nullptr); std::string code; @@ -1061,7 +1057,7 @@ TEST_P(WasmCommonTest, RestrictCapabilities) { {"foo", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_FALSE(wasm->capabilityAllowed("proxy_on_vm_start")); EXPECT_FALSE(wasm->capabilityAllowed("proxy_log")); @@ -1119,7 +1115,7 @@ TEST_P(WasmCommonTest, AllowOnVmStart) { {"proxy_on_vm_start", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); EXPECT_TRUE(wasm->capabilityAllowed("proxy_on_vm_start")); EXPECT_FALSE(wasm->capabilityAllowed("proxy_log")); @@ -1182,7 +1178,7 @@ TEST_P(WasmCommonTest, AllowLog) { {"proxy_log", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); // Restrict capabilities, but allow proxy_log EXPECT_TRUE(wasm->capabilityAllowed("proxy_on_vm_start")); @@ -1241,7 +1237,7 @@ TEST_P(WasmCommonTest, AllowWASI) { {"fd_write", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); // Restrict capabilities, but allow fd_write EXPECT_TRUE(wasm->capabilityAllowed("proxy_on_vm_start")); @@ -1301,7 +1297,7 @@ TEST_P(WasmCommonTest, AllowOnContextCreate) { {"proxy_log", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); // Restrict capabilities, but allow proxy_log EXPECT_TRUE(wasm->capabilityAllowed("proxy_on_vm_start")); @@ -1361,7 +1357,7 @@ TEST_P(WasmCommonTest, ThreadLocalCopyRetainsEnforcement) { {"fd_write", proxy_wasm::SanitizationConfig()}}; plugin->wasmConfig().allowedCapabilities() = allowed_capabilities; auto wasm = std::make_unique(plugin->wasmConfig(), vm_key, scope, - cluster_manager, *dispatcher); + *api, cluster_manager, *dispatcher); // Restrict capabilities EXPECT_TRUE(wasm->capabilityAllowed("proxy_on_vm_start")); @@ -1442,7 +1438,10 @@ TEST_P(WasmCommonContextTest, OnDnsResolve) { EXPECT_FALSE(code.empty()); std::shared_ptr dns_resolver(new Network::MockDnsResolver()); - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillRepeatedly(Return(dns_resolver)); + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + EXPECT_CALL(dns_resolver_factory, createDnsResolver(_, _, _)) + .WillRepeatedly(Return(dns_resolver)); Network::DnsResolver::ResolveCb dns_callback; Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index ca45b70ca26b..57a9fd690675 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -315,6 +315,46 @@ TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { client_->onSuccess(std::move(check_response), span_); } +// Test the client when an OK response is received with additional query string parameters. +TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithQueryParameters) { + initialize(); + + auto check_response = std::make_unique(); + auto status = check_response->mutable_status(); + + status->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); + + const Http::Utility::QueryParamsVector query_parameters_to_set{{"add-me", "yes"}}; + for (const auto& [key, value] : query_parameters_to_set) { + auto* query_parameter = check_response->mutable_ok_response()->add_query_parameters_to_set(); + query_parameter->set_key(key); + query_parameter->set_value(value); + } + + const std::vector query_parameters_to_remove{"remove-me"}; + for (const auto& key : query_parameters_to_remove) { + check_response->mutable_ok_response()->add_query_parameters_to_remove(key); + } + + // This is the expected authz response. + auto authz_response = Response{}; + authz_response.status = CheckStatus::OK; + authz_response.query_parameters_to_set = {{"add-me", "yes"}}; + authz_response.query_parameters_to_remove = {"remove-me"}; + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + + Http::TestRequestHeaderMapImpl headers; + client_->onCreateInitialMetadata(headers); + + EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); + client_->onSuccess(std::move(check_response), span_); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 66be0140ea7c..e7c943660aab 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -14,6 +14,30 @@ namespace Filters { namespace Common { namespace ExtAuthz { +// NOLINTNEXTLINE(readability-identifier-naming) +void PrintTo(const ResponsePtr& ptr, std::ostream* os) { + if (ptr != nullptr) { + PrintTo(*ptr, os); + } else { + (*os) << "null"; + } +} + +// NOLINTNEXTLINE(readability-identifier-naming) +void PrintTo(const Response& response, std::ostream* os) { + (*os) << "\n{\n check_status: " << int(response.status) + << "\n headers_to_append: " << response.headers_to_append + << "\n headers_to_set: " << response.headers_to_set + << "\n headers_to_add: " << response.headers_to_add + << "\n response_headers_to_add: " << response.response_headers_to_add + << "\n response_headers_to_set: " << response.response_headers_to_set + << "\n headers_to_remove: " << response.headers_to_remove + << "\n query_parameters_to_set: " << response.query_parameters_to_set + << "\n query_parameters_to_remove: " << response.query_parameters_to_remove + << "\n body: " << response.body << "\n status_code: " << int(response.status_code) + << "\n dynamic_metadata: " << response.dynamic_metadata.DebugString() << "\n}\n"; +} + CheckResponsePtr TestCommon::makeCheckResponse(Grpc::Status::GrpcStatus response_status, envoy::type::v3::StatusCode http_status_code, const std::string& body, @@ -130,6 +154,19 @@ bool TestCommon::compareVectorOfHeaderName(const std::vector(rhs.begin(), rhs.end()); } +bool TestCommon::compareVectorOfUnorderedStrings(const std::vector& lhs, + const std::vector& rhs) { + return std::set(lhs.begin(), lhs.end()) == + std::set(rhs.begin(), rhs.end()); +} + +// TODO(esmet): This belongs in a QueryParams class +bool TestCommon::compareQueryParamsVector(const Http::Utility::QueryParamsVector& lhs, + const Http::Utility::QueryParamsVector& rhs) { + return std::set>(lhs.begin(), lhs.end()) == + std::set>(rhs.begin(), rhs.end()); +} + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 5d1e72222713..5f88c3eb857e 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -16,6 +16,12 @@ namespace Filters { namespace Common { namespace ExtAuthz { +// NOLINTNEXTLINE(readability-identifier-naming) +void PrintTo(const ResponsePtr& ptr, std::ostream* os); + +// NOLINTNEXTLINE(readability-identifier-naming) +void PrintTo(const Response& response, std::ostream* os); + struct KeyValueOption { std::string key; std::string value; @@ -46,8 +52,12 @@ class TestCommon { static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers); static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); + static bool compareQueryParamsVector(const Http::Utility::QueryParamsVector& lhs, + const Http::Utility::QueryParamsVector& rhs); static bool compareVectorOfHeaderName(const std::vector& lhs, const std::vector& rhs); + static bool compareVectorOfUnorderedStrings(const std::vector& lhs, + const std::vector& rhs); }; MATCHER_P(AuthzErrorResponse, status, "") { @@ -95,23 +105,44 @@ MATCHER_P(AuthzOkResponse, response, "") { if (arg->status != response.status) { return false; } - // Compare headers_to_append. + if (!TestCommon::compareHeaderVector(response.headers_to_append, arg->headers_to_append)) { return false; } - // Compare headers_to_add. if (!TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add)) { return false; } - // Compare response_headers_to_add. if (!TestCommon::compareHeaderVector(response.response_headers_to_add, arg->response_headers_to_add)) { return false; } - return TestCommon::compareVectorOfHeaderName(response.headers_to_remove, arg->headers_to_remove); + if (!TestCommon::compareHeaderVector(response.response_headers_to_set, + arg->response_headers_to_set)) { + return false; + } + + if (!TestCommon::compareQueryParamsVector(response.query_parameters_to_set, + arg->query_parameters_to_set)) { + return false; + } + + if (!TestCommon::compareVectorOfUnorderedStrings(response.query_parameters_to_remove, + arg->query_parameters_to_remove)) { + return false; + } + + if (!TestCommon::compareVectorOfHeaderName(response.headers_to_remove, arg->headers_to_remove)) { + return false; + } + + if (!TestUtility::protoEqual(arg->dynamic_metadata, response.dynamic_metadata)) { + return false; + } + + return true; } MATCHER_P(ContainsPairAsHeader, pair, "") { diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index c8ddc35c8a90..e6dbc2a9b2b0 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -25,7 +25,7 @@ envoy_extension_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -43,7 +43,7 @@ envoy_extension_cc_test( "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -53,7 +53,7 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.admission_control"], deps = [ "//source/extensions/filters/http/admission_control:admission_control_filter_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) @@ -77,6 +77,6 @@ envoy_extension_cc_test( "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index eaea6c0c5cbd..62b99ed67450 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "envoy/grpc/status.h" #include "source/common/common/enum_to_int.h" diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index e59dba7611f6..319fb8ffc935 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -15,7 +15,7 @@ const std::string ADMISSION_CONTROL_CONFIG = R"EOF( name: envoy.filters.http.admission_control typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl success_criteria: http_criteria: grpc_criteria: diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index 80e1d9ce81ae..6bbeb0e4192f 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/common/stats/isolated_store_impl.h" #include "source/extensions/filters/http/admission_control/admission_control.h" diff --git a/test/extensions/filters/http/admission_control/controller_test.cc b/test/extensions/filters/http/admission_control/controller_test.cc index 2b7428251903..5457f9fc65aa 100644 --- a/test/extensions/filters/http/admission_control/controller_test.cc +++ b/test/extensions/filters/http/admission_control/controller_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/extensions/filters/http/admission_control/thread_local_controller.h" diff --git a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc index 7e3725daed47..99c421710d7d 100644 --- a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc +++ b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc @@ -1,7 +1,7 @@ #include -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" -#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3/admission_control.pb.validate.h" #include "source/common/common/enum_to_int.h" #include "source/extensions/filters/http/admission_control/admission_control.h" diff --git a/test/extensions/filters/http/alternate_protocols_cache/BUILD b/test/extensions/filters/http/alternate_protocols_cache/BUILD index b04a9403c840..e30f2549c64f 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/BUILD +++ b/test/extensions/filters/http/alternate_protocols_cache/BUILD @@ -33,11 +33,14 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.alternate_protocols_cache"], deps = [ "//source/extensions/filters/http/alternate_protocols_cache:config", + "//source/extensions/key_value/file_based:config_lib", "//test/integration:http_integration_lib", "//test/integration:http_protocol_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc index 0bfc84ac19fa..b09210bc3779 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc @@ -1,6 +1,8 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "source/extensions/transport_sockets/tls/context_config_impl.h" @@ -18,18 +20,39 @@ namespace { class FilterIntegrationTest : public HttpProtocolIntegrationTest { protected: void initialize() override { - const std::string filter = R"EOF( + const std::string filename = TestEnvironment::temporaryPath("alt_svc_cache.txt"); + envoy::config::core::v3::AlternateProtocolsCacheOptions alt_cache; + alt_cache.set_name("default_alternate_protocols_cache"); + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig config; + config.set_filename(filename); + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + kv_config.mutable_config()->set_name("envoy.key_value.file_based"); + kv_config.mutable_config()->mutable_typed_config()->PackFrom(config); + alt_cache.mutable_key_value_store_config()->set_name("envoy.common.key_value"); + alt_cache.mutable_key_value_store_config()->mutable_typed_config()->PackFrom(kv_config); + + const std::string filter = fmt::format(R"EOF( name: alternate_protocols_cache typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.alternate_protocols_cache.v3.FilterConfig alternate_protocols_cache_options: name: default_alternate_protocols_cache -)EOF"; + key_value_store_config: + name: "envoy.common.key_value" + typed_config: + "@type": type.googleapis.com/envoy.config.common.key_value.v3.KeyValueStoreConfig + config: + name: envoy.key_value.file_based + typed_config: + "@type": type.googleapis.com/envoy.extensions.key_value.file_based.v3.FileBasedKeyValueStoreConfig + filename: {} + +)EOF", + filename); config_helper_.prependFilter(filter); upstream_tls_ = true; - config_helper_.configureUpstreamTls(/*use_alpn=*/true, /*http3=*/true, - /*use_alternate_protocols_cache=*/true); + config_helper_.configureUpstreamTls(/*use_alpn=*/true, /*http3=*/true, alt_cache); HttpProtocolIntegrationTest::initialize(); } @@ -81,6 +104,77 @@ INSTANTIATE_TEST_SUITE_P(Protocols, FilterIntegrationTest, {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), HttpProtocolIntegrationTest::protocolTestParamsToString); +class MixedUpstreamIntegrationTest : public FilterIntegrationTest { +protected: + void writeFile() { + const std::string filename = TestEnvironment::temporaryPath("alt_svc_cache.txt"); + // There's no hostname here because we're not doing dynamic forward proxying so we infer the + // hostname from the config (which does not set it) + uint32_t port = fake_upstreams_[0]->localAddress()->ip()->port(); + std::string key = absl::StrCat("https://:", port); + + size_t seconds = std::chrono::duration_cast( + timeSystem().monotonicTime().time_since_epoch()) + .count(); + std::string value = absl::StrCat("h3=\":", port, "\"; ma=", 86400 + seconds); + TestEnvironment::writeStringToFileForTest( + "alt_svc_cache.txt", absl::StrCat(key.length(), "\n", key, value.length(), "\n", value)); + } + + void createUpstreams() override { + ASSERT_EQ(upstreamProtocol(), Http::CodecType::HTTP3); + ASSERT_EQ(fake_upstreams_count_, 1); + ASSERT_FALSE(autonomous_upstream_); + + if (use_http2_) { + auto config = configWithType(Http::CodecType::HTTP2); + Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); + addFakeUpstream(std::move(factory), Http::CodecType::HTTP2); + } else { + auto config = configWithType(Http::CodecType::HTTP3); + Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); + addFakeUpstream(std::move(factory), Http::CodecType::HTTP3); + writeFile(); + } + } + + bool use_http2_{false}; +}; + +TEST_P(MixedUpstreamIntegrationTest, BasicRequestAutoWithHttp3) { + testRouterRequestAndResponseWithBody(0, 0, false); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestsAutoWithHttp3) { + simultaneousRequest(1024, 512, 1023, 513); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousLargeRequestsAutoWithHttp3) { + config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. + simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); +} + +TEST_P(MixedUpstreamIntegrationTest, BasicRequestAutoWithHttp2) { + use_http2_ = true; + testRouterRequestAndResponseWithBody(0, 0, false); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestsAutoWithHttp2) { + use_http2_ = true; + simultaneousRequest(1024, 512, 1023, 513); +} + +TEST_P(MixedUpstreamIntegrationTest, SimultaneousLargeRequestsAutoWithHttp2) { + use_http2_ = true; + config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. + simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, MixedUpstreamIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + #endif } // namespace diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc index 951978299620..51e0c8516150 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_test.cc @@ -32,15 +32,16 @@ class FilterTest : public testing::Test, public Event::TestUsingSimulatedTime { envoy::extensions::filters::http::alternate_protocols_cache::v3::FilterConfig proto_config; if (populate_config) { proto_config.mutable_alternate_protocols_cache_options()->set_name("foo"); - EXPECT_CALL(*alternate_protocols_cache_manager_, getCache(_)) + EXPECT_CALL(*alternate_protocols_cache_manager_, getCache(_, _)) .WillOnce(Return(alternate_protocols_cache_)); } filter_config_ = std::make_shared( proto_config, alternate_protocols_cache_manager_factory_, simTime()); - filter_ = std::make_unique(filter_config_); + filter_ = std::make_unique(filter_config_, dispatcher_); filter_->setEncoderFilterCallbacks(callbacks_); } + Event::MockDispatcher dispatcher_; Http::MockAlternateProtocolsCacheManagerFactory alternate_protocols_cache_manager_factory_; std::shared_ptr alternate_protocols_cache_manager_; std::shared_ptr alternate_protocols_cache_; diff --git a/test/extensions/filters/http/bandwidth_limit/BUILD b/test/extensions/filters/http/bandwidth_limit/BUILD index b4e4ac3a87c9..c41fe5ca0fe9 100644 --- a/test/extensions/filters/http/bandwidth_limit/BUILD +++ b/test/extensions/filters/http/bandwidth_limit/BUILD @@ -23,7 +23,7 @@ envoy_extension_cc_test( "//source/common/runtime:runtime_lib", "//source/extensions/filters/http/bandwidth_limit:bandwidth_limit_lib", "//test/mocks/server:server_mocks", - "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/bandwidth_limit/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index b98ea7ca4d67..853a3ca8896a 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -11,8 +11,7 @@ namespace Extensions { namespace HttpFilters { namespace BandwidthLimitFilter { -using EnableMode = - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit_EnableMode; +using EnableMode = envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit_EnableMode; TEST(Factory, GlobalEmptyConfig) { const std::string yaml = R"( diff --git a/test/extensions/filters/http/bandwidth_limit/filter_test.cc b/test/extensions/filters/http/bandwidth_limit/filter_test.cc index daffe9076ea3..5d052318111c 100644 --- a/test/extensions/filters/http/bandwidth_limit/filter_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.pb.h" +#include "envoy/extensions/filters/http/bandwidth_limit/v3/bandwidth_limit.pb.h" #include "source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h" @@ -22,7 +22,7 @@ class FilterTest : public testing::Test { FilterTest() = default; void setup(const std::string& yaml) { - envoy::extensions::filters::http::bandwidth_limit::v3alpha::BandwidthLimit config; + envoy::extensions::filters::http::bandwidth_limit::v3::BandwidthLimit config; TestUtility::loadFromYaml(yaml, config); config_ = std::make_shared(config, stats_, runtime_, time_system_, true); filter_ = std::make_shared(config_); diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index 725e66c71fb8..c8409772591a 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -80,7 +80,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/cache/simple_http_cache/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/cache/simple_http_cache/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 5f09027b7390..5b813b8a368b 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -86,9 +86,9 @@ class CacheIntegrationTest : public Event::TestUsingSimulatedTime, const std::string default_config{R"EOF( name: "envoy.filters.http.cache" typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3alpha.CacheConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.http.cache.v3.CacheConfig" typed_config: - "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig" + "@type": "type.googleapis.com/envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig" )EOF"}; DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; }; diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 3fecae678b98..16c2e17641a1 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -118,7 +118,7 @@ class CacheFilterTest : public ::testing::Test { void waitBeforeSecondRequest() { time_source_.advanceTimeWait(delay_); } SimpleHttpCache simple_cache_; - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_; + envoy::extensions::filters::http::cache::v3::CacheConfig config_; NiceMock context_; Event::SimulatedTimeSystem time_source_; DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index fe1fa7098bf6..35a9a7dd60d2 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -802,9 +802,9 @@ TEST(CreateVaryIdentifier, DisallowedHeaderWithAllowedHeader) { absl::nullopt); } -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows {accept, accept-language, width} to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index 18ad3ebba33e..e5bbf9061b8d 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -25,9 +25,9 @@ class RequestConditionalHeadersTest : public testing::TestWithParam std::string conditionalHeader() const { return GetParam(); } }; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; diff --git a/test/extensions/filters/http/cache/config_test.cc b/test/extensions/filters/http/cache/config_test.cc index 0991f842a2f7..3583e95cb8f1 100644 --- a/test/extensions/filters/http/cache/config_test.cc +++ b/test/extensions/filters/http/cache/config_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/cache/simple_http_cache/v3alpha/config.pb.h" +#include "envoy/extensions/cache/simple_http_cache/v3/config.pb.h" #include "source/extensions/filters/http/cache/cache_filter.h" #include "source/extensions/filters/http/cache/config.h" @@ -16,7 +16,7 @@ namespace { class CacheFilterFactoryTest : public ::testing::Test { protected: - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config_; + envoy::extensions::filters::http::cache::v3::CacheConfig config_; NiceMock context_; CacheFilterFactory factory_; Http::MockFilterChainFactoryCallbacks filter_callback_; @@ -24,7 +24,7 @@ class CacheFilterFactoryTest : public ::testing::Test { TEST_F(CacheFilterFactoryTest, Basic) { config_.mutable_typed_config()->PackFrom( - envoy::extensions::cache::simple_http_cache::v3alpha::SimpleHttpCacheConfig()); + envoy::extensions::cache::simple_http_cache::v3::SimpleHttpCacheConfig()); Http::FilterFactoryCb cb = factory_.createFilterFactoryFromProto(config_, "stats", context_); Http::StreamFilterSharedPtr filter; EXPECT_CALL(filter_callback_, addStreamFilter(_)).WillOnce(::testing::SaveArg<0>(&filter)); @@ -39,7 +39,7 @@ TEST_F(CacheFilterFactoryTest, NoTypedConfig) { TEST_F(CacheFilterFactoryTest, UnregisteredTypedConfig) { config_.mutable_typed_config()->PackFrom( - envoy::extensions::filters::http::cache::v3alpha::CacheConfig()); + envoy::extensions::filters::http::cache::v3::CacheConfig()); EXPECT_THROW(factory_.createFilterFactoryFromProto(config_, "stats", context_), EnvoyException); } diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 5cf24b047d12..f9887661e1b6 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -30,9 +30,9 @@ struct LookupRequestTestCase { using Seconds = std::chrono::seconds; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index 7aad83b1d293..f2eb5ac181af 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -19,9 +19,9 @@ namespace { const std::string EpochDate = "Thu, 01 Jan 1970 00:00:00 GMT"; -envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { +envoy::extensions::filters::http::cache::v3::CacheConfig getConfig() { // Allows 'accept' to be varied in the tests. - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; const auto& add_accept = config.mutable_allowed_vary_headers()->Add(); add_accept->set_exact("accept"); return config; @@ -272,9 +272,9 @@ TEST_F(SimpleHttpCacheTest, StreamingPut) { TEST(Registration, GetFactory) { HttpCacheFactory* factory = Registry::FactoryRegistry::getFactoryByType( - "envoy.extensions.cache.simple_http_cache.v3alpha.SimpleHttpCacheConfig"); + "envoy.extensions.cache.simple_http_cache.v3.SimpleHttpCacheConfig"); ASSERT_NE(factory, nullptr); - envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; + envoy::extensions::filters::http::cache::v3::CacheConfig config; config.mutable_typed_config()->PackFrom(*factory->createEmptyConfigProto()); EXPECT_EQ(factory->getCache(config).cacheInfo().name_, "envoy.extensions.http.cache.simple"); } diff --git a/test/extensions/filters/http/cdn_loop/BUILD b/test/extensions/filters/http/cdn_loop/BUILD index e08b4eb33172..747f31fcabb7 100644 --- a/test/extensions/filters/http/cdn_loop/BUILD +++ b/test/extensions/filters/http/cdn_loop/BUILD @@ -22,7 +22,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) @@ -34,7 +34,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/cdn_loop:config", "//test/integration:http_protocol_integration_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/cdn_loop/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/cdn_loop/config_test.cc b/test/extensions/filters/http/cdn_loop/config_test.cc index ffcc776c9004..5e9b89d3c636 100644 --- a/test/extensions/filters/http/cdn_loop/config_test.cc +++ b/test/extensions/filters/http/cdn_loop/config_test.cc @@ -1,6 +1,6 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "source/extensions/filters/http/cdn_loop/config.h" #include "source/extensions/filters/http/cdn_loop/filter.h" @@ -23,7 +23,7 @@ TEST(CdnLoopFilterFactoryTest, ValidValuesWork) { Http::MockFilterChainFactoryCallbacks filter_callbacks; EXPECT_CALL(filter_callbacks, addStreamDecoderFilter(_)).WillOnce(::testing::SaveArg<0>(&filter)); - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("cdn"); CdnLoopFilterFactory factory; @@ -36,7 +36,7 @@ TEST(CdnLoopFilterFactoryTest, ValidValuesWork) { TEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; CdnLoopFilterFactory factory; EXPECT_THAT_THROWS_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), @@ -46,7 +46,7 @@ TEST(CdnLoopFilterFactoryTest, BlankCdnIdThrows) { TEST(CdnLoopFilterFactoryTest, InvalidCdnId) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("[not-token-or-ip"); CdnLoopFilterFactory factory; @@ -57,7 +57,7 @@ TEST(CdnLoopFilterFactoryTest, InvalidCdnId) { TEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("\r\n"); CdnLoopFilterFactory factory; @@ -68,7 +68,7 @@ TEST(CdnLoopFilterFactoryTest, InvalidCdnIdNonHeaderWhitespace) { TEST(CdnLoopFilterFactoryTest, InvalidParsedCdnIdNotInput) { NiceMock context; - envoy::extensions::filters::http::cdn_loop::v3alpha::CdnLoopConfig config; + envoy::extensions::filters::http::cdn_loop::v3::CdnLoopConfig config; config.set_cdn_id("cdn,cdn"); CdnLoopFilterFactory factory; diff --git a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc index 1403d84aa5ad..b6858b096157 100644 --- a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc +++ b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc @@ -1,6 +1,6 @@ #include -#include "envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.pb.h" +#include "envoy/extensions/filters/http/cdn_loop/v3/cdn_loop.pb.h" #include "test/integration/http_protocol_integration.h" #include "test/test_common/utility.h" @@ -16,14 +16,14 @@ namespace { const std::string MaxDefaultConfig = R"EOF( name: envoy.filters.http.cdn_loop typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3.CdnLoopConfig cdn_id: cdn )EOF"; const std::string MaxOf2Config = R"EOF( name: envoy.filters.http.cdn_loop typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.cdn_loop.v3.CdnLoopConfig cdn_id: cdn max_allowed_occurrences: 2 )EOF"; diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 04bc4579103f..339951337bc6 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -54,6 +54,7 @@ envoy_cc_test_library( "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/proto:bookstore_proto_cc_proto", + "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 index 60ffb84c5ac3..48300675a1c1 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.oauth" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2" + type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2" value: "\n\306\t\022\006\022\001(\032\001r\032<\n\035envoy.filters.\360\222\213\217Qgrpc_stats\022\r\022\013\022\002\010\006\"\005\010\200\200\200\001\032\014\022\n\n\001t\"\005\010\200\200\200\001\"\006\022\001(\032\001r*\005\n\003:\001=2\351\010\n\346\010*\343\010\n\010\n\006\010\200\200\200\200\004\022\326\010^^^^^j!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matche!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\r/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\"\n }\n}\nB\003\n\001A" } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 index 5bb334c90502..2122b9078716 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000\032\000*\003\022\001$" } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 index c91cc6a64987..8f8c418d77a1 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-4784906297278464 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000\032\002\020\002*\016\n\t\t+\000\000\000\000\000\000\000\022\001$" } } @@ -20,4 +20,4 @@ upstream_data { data: "=" data: "?" } -} \ No newline at end of file +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 index 1d3dd81ed0ec..e5d44679530e 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 @@ -1,7 +1,7 @@ config { name: "envoy.filters.http.admission_control" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3.AdmissionControl" value: "\022\000" } } diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 5a0265132bb2..c75446f2266f 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -7,6 +7,7 @@ #include "test/extensions/filters/http/common/fuzz/uber_filter.h" #include "test/proto/bookstore.pb.h" +#include "test/test_common/registry.h" // This file contains any filter-specific setup and input clean-up needed in the generic filter fuzz // target. @@ -125,7 +126,9 @@ void UberFilterFuzzer::perFilterSetup() { encoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; // Prepare expectations for dynamic forward proxy. - ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) + NiceMock dns_resolver_factory; + Registry::InjectFactory registered_dns_factory(dns_resolver_factory); + ON_CALL(dns_resolver_factory, createDnsResolver(_, _, _)) .WillByDefault(testing::Return(resolver_)); // Prepare expectations for TAP config. diff --git a/test/extensions/filters/http/composite/filter_test.cc b/test/extensions/filters/http/composite/filter_test.cc index f6591d75f964..93f65c5bd397 100644 --- a/test/extensions/filters/http/composite/filter_test.cc +++ b/test/extensions/filters/http/composite/filter_test.cc @@ -17,7 +17,7 @@ namespace { class FilterTest : public ::testing::Test { public: - FilterTest() : filter_(stats_) { + FilterTest() : filter_(stats_, decoder_callbacks_.dispatcher()) { filter_.setDecoderFilterCallbacks(decoder_callbacks_); filter_.setEncoderFilterCallbacks(encoder_callbacks_); } @@ -72,7 +72,7 @@ class FilterTest : public ::testing::Test { filter_.encodeComplete(); } - Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; + testing::NiceMock decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; Stats::MockCounter error_counter_; Stats::MockCounter success_counter_; diff --git a/test/extensions/filters/http/compressor/compressor_filter_test.cc b/test/extensions/filters/http/compressor/compressor_filter_test.cc index cefb918c0f21..c95efec61682 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_test.cc @@ -260,28 +260,6 @@ TEST_F(CompressorFilterTest, CompressRequestAndResponseNoContentLength) { doResponseCompression(headers, false); } -TEST_F(CompressorFilterTest, CompressRequestAndResponseNoContentLengthRuntimeDisabled) { - setUpFilter(R"EOF( -{ - "request_direction_config": {}, - "response_direction_config": {}, - "compressor_library": { - "name": "test", - "typed_config": { - "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" - } - } -} -)EOF"); - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.enable_compression_without_content_length_header", "false"}}); - response_stats_prefix_ = "response."; - doRequestNoCompression({{":method", "get"}, {"accept-encoding", "deflate, test"}}); - Http::TestResponseHeaderMapImpl headers{{":status", "200"}}; - doResponseNoCompression(headers); -} - TEST_F(CompressorFilterTest, CompressRequestWithTrailers) { setUpFilter(R"EOF( { diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 570fc3061359..c138d28fbb59 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -13,7 +13,6 @@ namespace Envoy { namespace { class ProxyFilterIntegrationTest : public testing::TestWithParam, - public Event::TestUsingSimulatedTime, public HttpIntegrationTest { public: ProxyFilterIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, GetParam()) {} @@ -62,7 +61,7 @@ name: dynamic_forward_proxy // Setup the initial CDS cluster. cluster_.mutable_connect_timeout()->CopyFrom( - Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + Protobuf::util::TimeUtil::MillisecondsToDuration(5000)); cluster_.set_name("cluster_0"); cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); @@ -127,14 +126,16 @@ name: envoy.clusters.dynamic_forward_proxy } else { HttpIntegrationTest::createUpstreams(); } - if (write_cache_file_) { - std::string host = - fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); - std::string value = + if (use_cache_file_) { + cache_file_value_contents_ += absl::StrCat(Network::Test::getLoopbackAddressUrlString(version_), ":", fake_upstreams_[0]->localAddress()->ip()->port(), "|1000000|0"); - TestEnvironment::writeStringToFileForTest( - "dns_cache.txt", absl::StrCat(host.length(), "\n", host, value.length(), "\n", value)); + std::string host = + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); + TestEnvironment::writeStringToFileForTest("dns_cache.txt", + absl::StrCat(host.length(), "\n", host, + cache_file_value_contents_.length(), + "\n", cache_file_value_contents_)); } } @@ -142,9 +143,16 @@ name: envoy.clusters.dynamic_forward_proxy std::string upstream_cert_name_{"upstreamlocalhost"}; CdsHelper cds_helper_; envoy::config::cluster::v3::Cluster cluster_; - bool write_cache_file_{}; + std::string cache_file_value_contents_; + bool use_cache_file_{}; }; +class ProxyFilterWithSimtimeIntegrationTest : public Event::TestUsingSimulatedTime, + public ProxyFilterIntegrationTest {}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterWithSimtimeIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -227,7 +235,7 @@ TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { } // Verify that we expire hosts. -TEST_P(ProxyFilterIntegrationTest, RemoveHostViaTTL) { +TEST_P(ProxyFilterWithSimtimeIntegrationTest, RemoveHostViaTTL) { initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ @@ -398,7 +406,7 @@ TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { } TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { - write_cache_file_ = true; + use_cache_file_ = true; initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -411,6 +419,93 @@ TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { checkSimpleRequestSuccess(1024, 1024, response.get()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_cx_http1_total")->value()); +} + +TEST_P(ProxyFilterIntegrationTest, UseCacheFileAndTestHappyEyeballs) { + autonomous_upstream_ = true; + + config_helper_.addRuntimeOverride("envoy.reloadable_features.allow_multiple_dns_addresses", + "true"); + use_cache_file_ = true; + // Prepend a bad address + cache_file_value_contents_ = "99.99.99.99:1|1000000|0\n"; + + initializeWithArgs(); + codec_client_ = makeHttpConnection(lookupPort("http")); + std::string host = fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", host}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + + // Wait for the request to be received. + test_server_->waitForCounterEq("cluster.cluster_0.upstream_rq_total", 1); + EXPECT_TRUE(response->waitForEndStream()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); +} + +TEST_P(ProxyFilterIntegrationTest, MultipleRequestsLowStreamLimit) { + setDownstreamProtocol(Http::CodecType::HTTP2); + setUpstreamProtocol(Http::CodecType::HTTP2); + + // Ensure we only have one connection upstream, one request active at a time. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + envoy::config::bootstrap::v3::Bootstrap::StaticResources* static_resources = + bootstrap.mutable_static_resources(); + envoy::config::cluster::v3::Cluster* cluster = static_resources->mutable_clusters(0); + envoy::config::cluster::v3::CircuitBreakers* circuit_breakers = + cluster->mutable_circuit_breakers(); + circuit_breakers->add_thresholds()->mutable_max_connections()->set_value(1); + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_explicit_http_config() + ->mutable_http2_protocol_options() + ->mutable_max_concurrent_streams() + ->set_value(1); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + + // Start sending the request, but ensure no end stream will be sent, so the + // stream will stay in use. + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Start sending the request, but ensure no end stream will be sent, so the + // stream will stay in use. + std::pair encoder_decoder = + codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); + + // Make sure the headers are received. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + // Start another request. + IntegrationStreamDecoderPtr response2 = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + test_server_->waitForCounterEq("http.config_test.downstream_rq_total", 2); + // Make sure the stream is not received. + ASSERT_FALSE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_, + std::chrono::milliseconds(100))); + + // Finish the first stream. + codec_client_->sendData(*request_encoder_, 0, true); + upstream_request_->encodeHeaders(default_response_headers_, true); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + + // This should allow the second stream to complete + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + upstream_request_->encodeHeaders(default_response_headers_, true); + ASSERT_TRUE(response2->waitForEndStream()); + EXPECT_TRUE(response2->complete()); + EXPECT_EQ("200", response2->headers().getStatusValue()); } } // namespace diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index bd8affbc0589..8f3033bd225a 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -70,6 +70,49 @@ template class HttpFilterTestBase : public T { connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); } + void queryParameterTest(const std::string& original_path, const std::string& expected_path, + const Http::Utility::QueryParamsVector& add_me, + const std::vector& remove_me) { + InSequence s; + + // Set up all the typical headers plus a path with a query string that we'll remove later. + request_headers_.addCopy(Http::Headers::get().Host, "example.com"); + request_headers_.addCopy(Http::Headers::get().Method, "GET"); + request_headers_.addCopy(Http::Headers::get().Path, original_path); + request_headers_.addCopy(Http::Headers::get().Scheme, "https"); + + prepareCheck(); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.query_parameters_to_set = add_me; + response.query_parameters_to_remove = remove_me; + + auto response_ptr = std::make_unique(response); + + EXPECT_CALL(*client_, check(_, _, _, _)) + .WillOnce(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, + const envoy::service::auth::v3::CheckRequest&, Tracing::Span&, + const StreamInfo::StreamInfo&) -> void { + callbacks.onComplete(std::move(response_ptr)); + })); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(request_headers_.getPathValue(), expected_path); + + Buffer::OwnedImpl response_data{}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseTrailerMapImpl response_trailers{}; + Http::MetadataMap response_metadata{}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(response_metadata)); + } + NiceMock stats_store_; envoy::config::bootstrap::v3::Bootstrap bootstrap_; FilterConfigSharedPtr config_; @@ -1789,6 +1832,54 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { EXPECT_EQ(response_headers.get_("should-be-overridden"), "finally-set-by-auth-server"); } +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithUnmodifiedQueryParameters) { + const std::string original_path{"/users?leave-me=alone"}; + const std::string expected_path{"/users?leave-me=alone"}; + const Http::Utility::QueryParamsVector add_me{}; + const std::vector remove_me{"remove-me"}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithAddedQueryParameters) { + const std::string original_path{"/users"}; + const std::string expected_path{"/users?add-me=123"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "123"}}; + const std::vector remove_me{}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithAddedAndRemovedQueryParameters) { + const std::string original_path{"/users?remove-me=123"}; + const std::string expected_path{"/users?add-me=456"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "456"}}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithRemovedQueryParameters) { + const std::string original_path{"/users?remove-me=definitely"}; + const std::string expected_path{"/users"}; + const Http::Utility::QueryParamsVector add_me{}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithOverwrittenQueryParameters) { + const std::string original_path{"/users?overwrite-me=original"}; + const std::string expected_path{"/users?overwrite-me=new"}; + const Http::Utility::QueryParamsVector add_me{{"overwrite-me", "new"}}; + const std::vector remove_me{}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithManyModifiedQueryParameters) { + const std::string original_path{"/users?remove-me=1&overwrite-me=2&leave-me=3"}; + const std::string expected_path{"/users?add-me=9&leave-me=3&overwrite-me=new"}; + const Http::Utility::QueryParamsVector add_me{{"add-me", "9"}, {"overwrite-me", "new"}}; + const std::vector remove_me{{"remove-me"}}; + queryParameterTest(original_path, expected_path, add_me, remove_me); +} + // Test that an synchronous denied response from the authorization service, on the call stack, // results in request not continuing. TEST_P(HttpFilterTestParam, ImmediateDeniedResponse) { diff --git a/test/extensions/filters/http/ext_proc/BUILD b/test/extensions/filters/http/ext_proc/BUILD index 94a5abbb88ff..cbe1ca689ac8 100644 --- a/test/extensions/filters/http/ext_proc/BUILD +++ b/test/extensions/filters/http/ext_proc/BUILD @@ -38,7 +38,7 @@ envoy_extension_cc_test( "//test/mocks/event:event_mocks", "//test/mocks/server:factory_context_mocks", "//test/test_common:test_runtime_lib", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -105,8 +105,8 @@ envoy_extension_cc_test( "//test/common/http:common_lib", "//test/integration:http_integration_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -124,8 +124,8 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -139,8 +139,8 @@ envoy_extension_cc_test_library( "//test/test_common:network_utility_lib", "@com_github_grpc_grpc//:grpc++", "@com_google_absl//absl/strings:str_format", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_grpc", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_grpc", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", ], ) @@ -182,8 +182,8 @@ envoy_extension_cc_test_library( "//test/test_common:utility_lib", "@com_github_grpc_grpc//:grpc++", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -202,8 +202,8 @@ envoy_cc_fuzz_test( "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", - "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/ext_proc/client_test.cc b/test/extensions/filters/http/ext_proc/client_test.cc index 4f58f7e8aedd..a95573d1dea5 100644 --- a/test/extensions/filters/http/ext_proc/client_test.cc +++ b/test/extensions/filters/http/ext_proc/client_test.cc @@ -11,8 +11,8 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using testing::Invoke; using testing::Unused; @@ -42,7 +42,7 @@ class ExtProcStreamTest : public testing::Test, public ExternalProcessorCallback Grpc::RawAsyncClientSharedPtr doFactory(Unused, Unused, Unused, Unused) { auto async_client = std::make_shared(); EXPECT_CALL(*async_client, - startRaw("envoy.service.ext_proc.v3alpha.ExternalProcessor", "Process", _, _)) + startRaw("envoy.service.ext_proc.v3.ExternalProcessor", "Process", _, _)) .WillOnce(Invoke(this, &ExtProcStreamTest::doStartRaw)); return async_client; } diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc index c03cafa52ada..3e8f1af52e33 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc @@ -18,8 +18,8 @@ // 7. Remove locks after crash is addressed by separate issue #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/network/address_impl.h" @@ -36,9 +36,9 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; // The buffer size for the listeners static const uint32_t BufferSize = 100000; @@ -216,7 +216,7 @@ class ExtProcIntegrationFuzz : public HttpIntegrationTest, } } - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; TestProcessor test_processor_; Network::Address::IpVersion ip_version_; Grpc::ClientType client_type_; diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc index b5d5e112dffe..336f0a4364f4 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc @@ -1,8 +1,8 @@ #include "test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h" #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/common/thread.h" @@ -16,12 +16,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using envoy::type::v3::StatusCode; const StatusCode HttpStatusCodes[] = { diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h index 1b5a6359dea4..30db02579306 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h @@ -1,8 +1,8 @@ #pragma once #include "envoy/config/core/v3/base.pb.h" -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "envoy/type/v3/http_status.pb.h" #include "source/common/common/thread.h" @@ -19,12 +19,12 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeaderMutation; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeaderMutation; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using envoy::type::v3::StatusCode; const uint32_t ExtProcFuzzMaxDataSize = 1024; diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index 7dc977d92640..aeffd710edc2 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -1,8 +1,8 @@ #include -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" #include "envoy/network/address.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/extensions/filters/http/ext_proc/config.h" @@ -17,21 +17,21 @@ namespace Envoy { using envoy::config::route::v3::Route; using envoy::config::route::v3::VirtualHost; -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; using envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; using Envoy::Protobuf::MapPair; using Envoy::ProtobufWkt::Any; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::HttpBody; -using envoy::service::ext_proc::v3alpha::HttpHeaders; -using envoy::service::ext_proc::v3alpha::HttpTrailers; -using envoy::service::ext_proc::v3alpha::ImmediateResponse; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; -using envoy::service::ext_proc::v3alpha::TrailersResponse; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::HttpBody; +using envoy::service::ext_proc::v3::HttpHeaders; +using envoy::service::ext_proc::v3::HttpTrailers; +using envoy::service::ext_proc::v3::ImmediateResponse; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; +using envoy::service::ext_proc::v3::TrailersResponse; using Extensions::HttpFilters::ExternalProcessing::HasNoHeader; using Extensions::HttpFilters::ExternalProcessing::HeaderProtosEqual; using Extensions::HttpFilters::ExternalProcessing::SingleHeaderValueIs; @@ -306,7 +306,7 @@ class ExtProcIntegrationTest : public HttpIntegrationTest, processor_stream_->sendGrpcMessage(response); } - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; FakeHttpConnectionPtr processor_connection_; FakeStreamPtr processor_stream_; }; diff --git a/test/extensions/filters/http/ext_proc/filter_test.cc b/test/extensions/filters/http/ext_proc/filter_test.cc index c4207736d6b7..386f7401d48f 100644 --- a/test/extensions/filters/http/ext_proc/filter_test.cc +++ b/test/extensions/filters/http/ext_proc/filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/extensions/filters/http/ext_proc/ext_proc.h" @@ -25,15 +25,15 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExtProcPerRoute; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::BodyResponse; -using envoy::service::ext_proc::v3alpha::CommonResponse; -using envoy::service::ext_proc::v3alpha::HeadersResponse; -using envoy::service::ext_proc::v3alpha::HttpBody; -using envoy::service::ext_proc::v3alpha::HttpHeaders; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ExtProcPerRoute; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::BodyResponse; +using envoy::service::ext_proc::v3::CommonResponse; +using envoy::service::ext_proc::v3::HeadersResponse; +using envoy::service::ext_proc::v3::HttpBody; +using envoy::service::ext_proc::v3::HttpHeaders; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::FilterDataStatus; using Http::FilterHeadersStatus; @@ -77,7 +77,7 @@ class HttpFilterTest : public testing::Test { return timer; })); - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config{}; if (!yaml.empty()) { TestUtility::loadFromYaml(yaml, proto_config); } diff --git a/test/extensions/filters/http/ext_proc/mock_server.h b/test/extensions/filters/http/ext_proc/mock_server.h index 232590fc5773..ea0875b01a2e 100644 --- a/test/extensions/filters/http/ext_proc/mock_server.h +++ b/test/extensions/filters/http/ext_proc/mock_server.h @@ -21,7 +21,7 @@ class MockStream : public ExternalProcessorStream { public: MockStream(); ~MockStream() override; - MOCK_METHOD(void, send, (envoy::service::ext_proc::v3alpha::ProcessingRequest&&, bool)); + MOCK_METHOD(void, send, (envoy::service::ext_proc::v3::ProcessingRequest&&, bool)); MOCK_METHOD(bool, close, ()); }; diff --git a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc index c617caa299d5..6b370811ac1e 100644 --- a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc +++ b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc @@ -11,7 +11,7 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::service::ext_proc::v3alpha::BodyMutation; +using envoy::service::ext_proc::v3::BodyMutation; using Http::LowerCaseString; @@ -53,7 +53,7 @@ TEST(MutationUtils, TestApplyMutations) { {"x-envoy-strange-thing", "No"}, }; - envoy::service::ext_proc::v3alpha::HeaderMutation mutation; + envoy::service::ext_proc::v3::HeaderMutation mutation; auto* s = mutation.add_set_headers(); s->mutable_append()->set_value(true); s->mutable_header()->set_key("x-append-this"); @@ -135,7 +135,7 @@ TEST(MutationUtils, TestApplyMutations) { TEST(MutationUtils, TestNonAppendableHeaders) { Http::TestRequestHeaderMapImpl headers; - envoy::service::ext_proc::v3alpha::HeaderMutation mutation; + envoy::service::ext_proc::v3::HeaderMutation mutation; auto* s = mutation.add_set_headers(); s->mutable_append()->set_value(true); s->mutable_header()->set_key(":path"); diff --git a/test/extensions/filters/http/ext_proc/ordering_test.cc b/test/extensions/filters/http/ext_proc/ordering_test.cc index f35bdda128c8..776184bb8828 100644 --- a/test/extensions/filters/http/ext_proc/ordering_test.cc +++ b/test/extensions/filters/http/ext_proc/ordering_test.cc @@ -22,10 +22,10 @@ namespace HttpFilters { namespace ExternalProcessing { namespace { -using envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor; -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Event::MockTimer; using Http::FilterDataStatus; diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 14025f3a3ccc..5e1d925068b4 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "source/common/common/hash.h" #include "source/common/network/address_impl.h" @@ -18,9 +18,9 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; -using envoy::service::ext_proc::v3alpha::ProcessingRequest; -using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::extensions::filters::http::ext_proc::v3::ProcessingMode; +using envoy::service::ext_proc::v3::ProcessingRequest; +using envoy::service::ext_proc::v3::ProcessingResponse; using Http::LowerCaseString; @@ -129,7 +129,7 @@ class StreamingIntegrationTest : public HttpIntegrationTest, } TestProcessor test_processor_; - envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + envoy::extensions::filters::http::ext_proc::v3::ExternalProcessor proto_config_{}; IntegrationStreamDecoderPtr client_response_; std::atomic processor_request_hash_; std::atomic processor_response_hash_; @@ -472,6 +472,7 @@ TEST_P(StreamingIntegrationTest, GetAndProcessStreamedResponseBody) { EXPECT_TRUE(client_response_->complete()); EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); EXPECT_EQ(client_response_->body().size(), response_size); + test_processor_.shutdown(); EXPECT_EQ(processor_response_hash_, HashUtil::xxHash64(client_response_->body())); } diff --git a/test/extensions/filters/http/ext_proc/test_processor.cc b/test/extensions/filters/http/ext_proc/test_processor.cc index b62807cab6bd..26094afbd131 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.cc +++ b/test/extensions/filters/http/ext_proc/test_processor.cc @@ -1,6 +1,6 @@ #include "test/extensions/filters/http/ext_proc/test_processor.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "test/test_common/network_utility.h" @@ -14,8 +14,8 @@ namespace ExternalProcessing { grpc::Status ProcessorWrapper::Process( grpc::ServerContext* ctx, - grpc::ServerReaderWriter* stream) { + grpc::ServerReaderWriter* stream) { if (context_callback_) { (*context_callback_)(ctx); } diff --git a/test/extensions/filters/http/ext_proc/test_processor.h b/test/extensions/filters/http/ext_proc/test_processor.h index 1b875669b74a..bd0d8518b7bd 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.h +++ b/test/extensions/filters/http/ext_proc/test_processor.h @@ -4,8 +4,8 @@ #include #include "envoy/network/address.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.grpc.pb.h" -#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.grpc.pb.h" +#include "envoy/service/ext_proc/v3/external_processor.pb.h" #include "grpc++/server.h" #include "gtest/gtest.h" @@ -17,9 +17,9 @@ namespace ExternalProcessing { // Implementations of this function are called for each gRPC stream sent // to the external processing server. -using ProcessingFunc = std::function*)>; +using ProcessingFunc = + std::function*)>; // An implementation of this function may be called so that a test may verify // the gRPC context. @@ -27,16 +27,15 @@ using ContextProcessingFunc = std::function; // An implementation of the ExternalProcessor service that may be included // in integration tests. -class ProcessorWrapper : public envoy::service::ext_proc::v3alpha::ExternalProcessor::Service { +class ProcessorWrapper : public envoy::service::ext_proc::v3::ExternalProcessor::Service { public: ProcessorWrapper(ProcessingFunc& cb, absl::optional context_cb) : callback_(cb), context_callback_(context_cb) {} - grpc::Status - Process(grpc::ServerContext*, - grpc::ServerReaderWriter* stream) - override; + grpc::Status Process( + grpc::ServerContext*, + grpc::ServerReaderWriter* stream) override; private: ProcessingFunc callback_; diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 99baf6b1789b..474d74e96aa8 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -218,18 +218,47 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryPost) { R"({"id":"20","theme":"Children"})"); } +TEST_P(GrpcJsonTranscoderIntegrationTest, TestParamUnescapePlus) { + const std::string filter = + R"EOF( + name: grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + proto_descriptor : "{}" + services : "bookstore.Bookstore" + query_param_unescape_plus: true + )EOF"; + config_helper_.prependFilter( + fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); + HttpIntegrationTest::initialize(); + // Test '+', 'query_param_unescape_plus' is true, '-' is converted to space. + testTranscoding( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/shelf?shelf.theme=Children+Books"}, + {":authority", "host"}, + {"content-type", "application/json"}}, + "", {R"(shelf { theme: "Children Books" })"}, {R"(id: 20 theme: "Children" )"}, Status(), + Http::TestResponseHeaderMapImpl{ + {":status", "200"}, + {"content-type", "application/json"}, + }, + R"({"id":"20","theme":"Children"})"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, QueryParams) { HttpIntegrationTest::initialize(); // 1. Binding theme='Children' in CreateShelfRequest // Using the following HTTP template: // POST /shelves // body: shelf + + // Test '+', 'query_param_unescape_plus' is false by default, '-' is not converted to space. testTranscoding( Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf?shelf.theme=Children"}, + {":path", "/shelf?shelf.theme=Children+Books"}, {":authority", "host"}, {"content-type", "application/json"}}, - "", {R"(shelf { theme: "Children" })"}, {R"(id: 20 theme: "Children" )"}, Status(), + "", {R"(shelf { theme: "Children+Books" })"}, {R"(id: 20 theme: "Children" )"}, Status(), Http::TestResponseHeaderMapImpl{ {":status", "200"}, {"content-type", "application/json"}, @@ -1292,72 +1321,5 @@ TEST_P(OverrideConfigGrpcJsonTranscoderIntegrationTest, RouteOverride) { R"({"shelves":[{"id":"20","theme":"Children"},{"id":"1","theme":"Foo"}]})"); }; -// Tests to ensure transcoding buffer limits do not apply when the runtime feature is disabled. -class BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest - : public GrpcJsonTranscoderIntegrationTest { -public: - void SetUp() override { - setUpstreamProtocol(Http::CodecType::HTTP2); - const std::string filter = - R"EOF( - name: grpc_json_transcoder - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder - proto_descriptor : "{}" - services : "bookstore.Bookstore" - )EOF"; - config_helper_.prependFilter( - fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); - - // Disable runtime feature. - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", "false"); - } -}; -INSTANTIATE_TEST_SUITE_P(IpVersions, BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, UnaryPostRequestExceedsBufferLimit) { - // Request body is more than 20 bytes. - config_helper_.setBufferLimits(2 << 20, 20); - HttpIntegrationTest::initialize(); - - // Transcoding succeeds. - testTranscoding( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf"}, - {":authority", "host"}, - {"content-type", "application/json"}}, - R"({"theme": "Children 0123456789 0123456789 0123456789 0123456789"})", - {R"(shelf { theme: "Children 0123456789 0123456789 0123456789 0123456789" })"}, {R"(id: 1)"}, - Status(), - Http::TestResponseHeaderMapImpl{{":status", "200"}, - {"content-type", "application/json"}, - {"content-length", "10"}, - {"grpc-status", "0"}}, - R"({"id":"1"})"); -} - -TEST_P(BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, UnaryPostResponseExceedsBufferLimit) { - // Request body is less than 35 bytes. - // Response body is more than 35 bytes. - config_helper_.setBufferLimits(2 << 20, 35); - HttpIntegrationTest::initialize(); - - // Transcoding succeeds. However, the downstream client is unable to buffer the full response. - // We can tell these errors are NOT from the transcoder because the response body is too generic. - testTranscoding( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf"}, - {":authority", "host"}, - {"content-type", "application/json"}}, - R"({"theme": "Children"})", {R"(shelf { theme: "Children" })"}, - {R"(id: 20 theme: "Children 0123456789 0123456789 0123456789 0123456789" )"}, Status(), - Http::TestResponseHeaderMapImpl{ - {":status", "500"}, {"content-type", "text/plain"}, {"content-length", "21"}}, - R"(Internal Server Error)"); -} - } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index 235168207001..2d8bdf6214c8 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -13,11 +13,9 @@ constexpr absl::string_view text{"application/grpc-web-text"}; constexpr absl::string_view binary{"application/grpc-web"}; constexpr uint64_t MAX_BUFFERED_PLAINTEXT_LENGTH = 16384; -using SkipEncodingEmptyTrailers = bool; using ContentType = std::string; using Accept = std::string; -using TestParams = std::tuple; +using TestParams = std::tuple; class GrpcWebFilterIntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { @@ -33,21 +31,12 @@ class GrpcWebFilterIntegrationTest : public testing::TestWithParam, void initialize() override { if (downstream_protocol_ == Http::CodecType::HTTP1) { config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); - } else { - skipEncodingEmptyTrailers(http2_skip_encoding_empty_trailers_); } - setUpstreamProtocol(Http::CodecType::HTTP2); HttpIntegrationTest::initialize(); } - void skipEncodingEmptyTrailers(SkipEncodingEmptyTrailers http2_skip_encoding_empty_trailers) { - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.http2_skip_encoding_empty_trailers", - http2_skip_encoding_empty_trailers ? "true" : "false"); - } - void setLocalReplyConfig(const std::string& yaml) { envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig local_reply_config; @@ -134,27 +123,23 @@ class GrpcWebFilterIntegrationTest : public testing::TestWithParam, static std::string testParamsToString(const testing::TestParamInfo params) { return fmt::format( - "{}_{}_{}_{}_{}", + "{}_{}_{}_{}", TestUtility::ipTestParamsToString(testing::TestParamInfo( std::get<0>(params.param), params.index)), std::get<1>(params.param) == Http::CodecType::HTTP2 ? "Http2" : "Http", - std::get<2>(params.param) ? "SkipEncodingEmptyTrailers" : "SubmitEncodingEmptyTrailers", - std::get<3>(params.param) == text ? "SendText" : "SendBinary", - std::get<4>(params.param) == text ? "AcceptText" : "AcceptBinary"); + std::get<2>(params.param) == text ? "SendText" : "SendBinary", + std::get<3>(params.param) == text ? "AcceptText" : "AcceptBinary"); } const Envoy::Http::CodecType downstream_protocol_{std::get<1>(GetParam())}; - const bool http2_skip_encoding_empty_trailers_{std::get<2>(GetParam())}; - const ContentType content_type_{std::get<3>(GetParam())}; - const Accept accept_{std::get<4>(GetParam())}; + const ContentType content_type_{std::get<2>(GetParam())}; + const Accept accept_{std::get<3>(GetParam())}; }; INSTANTIATE_TEST_SUITE_P( Params, GrpcWebFilterIntegrationTest, testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Values(Http::CodecType::HTTP1, Http::CodecType::HTTP2), - testing::Values(SkipEncodingEmptyTrailers{true}, - SkipEncodingEmptyTrailers{false}), testing::Values(ContentType{text}, ContentType{binary}), testing::Values(Accept{text}, Accept{binary})), GrpcWebFilterIntegrationTest::testParamsToString); @@ -206,15 +191,9 @@ TEST_P(GrpcWebFilterIntegrationTest, GrpcWebTrailersNotDuplicated) { } if (downstream_protocol_ == Http::CodecType::HTTP2) { - if (http2_skip_encoding_empty_trailers_) { - // When the downstream protocol is HTTP/2 and the feature-flag to skip encoding empty trailers - // is turned on, expect that the trailers are included in the response-body. - EXPECT_EQ(nullptr, response->trailers()); - } else { - // Otherwise, we send empty trailers. - ASSERT_NE(nullptr, response->trailers()); - EXPECT_TRUE(response->trailers()->empty()); - } + // When the downstream protocol is HTTP/2 expect that the trailers are included in the + // response-body. + EXPECT_EQ(nullptr, response->trailers()); } } diff --git a/test/extensions/filters/http/health_check/BUILD b/test/extensions/filters/http/health_check/BUILD index c72e32c8adba..74a5d771c601 100644 --- a/test/extensions/filters/http/health_check/BUILD +++ b/test/extensions/filters/http/health_check/BUILD @@ -38,3 +38,17 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "health_check_integration_test", + srcs = [ + "health_check_integration_test.cc", + ], + extension_names = ["envoy.filters.http.health_check"], + deps = [ + "//source/extensions/filters/http/buffer:config", + "//source/extensions/filters/http/health_check:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/health_check/health_check_integration_test.cc b/test/extensions/filters/http/health_check/health_check_integration_test.cc new file mode 100644 index 000000000000..8c0f18d21231 --- /dev/null +++ b/test/extensions/filters/http/health_check/health_check_integration_test.cc @@ -0,0 +1,185 @@ +#include "test/integration/http_protocol_integration.h" + +using testing::HasSubstr; +using testing::Not; + +namespace Envoy { +namespace { + +class HealthCheckIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + HttpProtocolIntegrationTest::initialize(); + } + absl::string_view request(const std::string port_key, const std::string method, + const std::string endpoint, BufferingStreamDecoderPtr& response) { + response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", + downstreamProtocol(), version_); + EXPECT_TRUE(response->complete()); + return response->headers().getStatusValue(); + } +}; + +// Add a health check filter and verify correct behavior when draining. +TEST_P(HealthCheckIntegrationTest, DrainCloseGradual) { + // The probability of drain close increases over time. With a high timeout, + // the probability will be very low, but the rapid retries prevent this from + // increasing total test time. + drain_time_ = std::chrono::seconds(100); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + } + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecType::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(HealthCheckIntegrationTest, DrainCloseImmediate) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(100); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecType::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +// Add a health check filter and verify correct computation of health based on upstream status. +TEST_P(HealthCheckIntegrationTest, ComputedHealthCheck) { + config_helper_.prependFilter(R"EOF( +name: health_check +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + pass_through_mode: false + cluster_min_healthy_percentages: + example_cluster_name: { value: 75 } +)EOF"); + HttpProtocolIntegrationTest::initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +// Add a health check filter and verify correct computation of health based on upstream status. +TEST_P(HealthCheckIntegrationTest, ModifyBuffer) { + config_helper_.prependFilter(R"EOF( +name: health_check +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + pass_through_mode: false + cluster_min_healthy_percentages: + example_cluster_name: { value: 75 } +)EOF"); + HttpProtocolIntegrationTest::initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheck) { + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); + EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheckWithoutServerStats) { + envoy::config::metrics::v3::StatsMatcher stats_matcher; + stats_matcher.mutable_exclusion_list()->add_patterns()->set_prefix("server."); + config_helper_.addConfigModifier( + [stats_matcher](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher; + }); + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); + EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); + + EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); + EXPECT_EQ("200", request("admin", "GET", "/stats", response)); + EXPECT_THAT(response->body(), Not(HasSubstr("server."))); +} + +TEST_P(HealthCheckIntegrationTest, HealthCheckWithBufferFilter) { + config_helper_.prependFilter(ConfigHelper::defaultBufferFilter()); + initialize(); + + BufferingStreamDecoderPtr response; + EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, HealthCheckIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2}, + {Http::CodecType::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index d2715efa89c6..2eaf8db8acb3 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -252,37 +252,6 @@ TEST_F(HealthCheckFilterNoPassThroughTest, HealthCheckFailedCallbackCalled) { EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers)); } -// Verifies that header is not sent on HC requests when -// "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster" is disabled. -TEST_F(HealthCheckFilterNoPassThroughTest, - HealthCheckFailedCallbackCalledImmediateFailureExcludeDisabled) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster", "false"}}); - - EXPECT_CALL(context_, healthCheckFailed()).Times(2).WillRepeatedly(Return(true)); - EXPECT_CALL(callbacks_.stream_info_, healthCheck(true)); - EXPECT_CALL(callbacks_.active_span_, setSampled(false)); - Http::TestResponseHeaderMapImpl health_check_response{{":status", "503"}}; - EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&health_check_response), true)) - .Times(1) - .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { - filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); - EXPECT_EQ(nullptr, headers.EnvoyImmediateHealthCheckFail()); - })); - - EXPECT_CALL(callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)); - - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, - filter_->decodeHeaders(request_headers_, false)); - Buffer::OwnedImpl data("hello"); - EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data, false)); - Http::TestRequestTrailerMapImpl request_trailers; - EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers)); -} - TEST_F(HealthCheckFilterPassThroughTest, Ok) { EXPECT_CALL(context_, healthCheckFailed()).Times(2).WillRepeatedly(Return(false)); EXPECT_CALL(callbacks_.stream_info_, healthCheck(true)); diff --git a/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc b/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc index 6d9a173b9965..f146af5d4fd8 100644 --- a/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc +++ b/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc @@ -74,6 +74,8 @@ TEST_P(KillRequestFilterIntegrationTestAllProtocols, KillRequestCrashEnvoyOnResp "KillRequestFilter is crashing Envoy!!!"); } +// Disabled for coverage per #18569 +#if !defined(ENVOY_CONFIG_COVERAGE) TEST_P(KillRequestFilterIntegrationTestAllProtocols, KillRequestCrashEnvoyWithCustomKillHeader) { const std::string filter_config_with_custom_kill_header = R"EOF( @@ -96,6 +98,7 @@ name: envoy.filters.http.kill_request EXPECT_DEATH(sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 1024), "KillRequestFilter is crashing Envoy!!!"); } +#endif TEST_P(KillRequestFilterIntegrationTestAllProtocols, KillRequestDisabledWhenHeaderIsMissing) { initializeFilter(filter_config_); diff --git a/test/extensions/filters/http/oauth2/BUILD b/test/extensions/filters/http/oauth2/BUILD index 04bfb0b34f0f..8a50c3b43d09 100644 --- a/test/extensions/filters/http/oauth2/BUILD +++ b/test/extensions/filters/http/oauth2/BUILD @@ -18,7 +18,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/oauth2:config", "//test/mocks/server:factory_context_mocks", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) @@ -49,7 +49,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/server:server_mocks", "//test/mocks/upstream:upstream_mocks", - "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/oauth2/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/oauth2/config_test.cc b/test/extensions/filters/http/oauth2/config_test.cc index 74dfe5daa6c4..abfd3206b3cc 100644 --- a/test/extensions/filters/http/oauth2/config_test.cc +++ b/test/extensions/filters/http/oauth2/config_test.cc @@ -1,7 +1,7 @@ #include #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/utility.h" @@ -86,6 +86,10 @@ TEST(ConfigTest, CreateFilter) { name: token hmac_secret: name: hmac + cookie_names: + bearer_token: BearerToken + oauth_hmac: OauthHMAC + oauth_expires: OauthExpires authorization_endpoint: https://oauth.com/oauth/authorize/ redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" redirect_path_matcher: @@ -139,7 +143,7 @@ TEST(ConfigTest, InvalidHmacSecret) { TEST(ConfigTest, CreateFilterMissingConfig) { OAuth2Config config; - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config; + envoy::extensions::filters::http::oauth2::v3::OAuth2 proto_config; NiceMock factory_context; EXPECT_THROW_WITH_MESSAGE( @@ -147,6 +151,48 @@ TEST(ConfigTest, CreateFilterMissingConfig) { EnvoyException, "config must be present for global config"); } +TEST(ConfigTest, WrongCookieName) { + const std::string yaml = R"EOF( +config: + token_endpoint: + cluster: foo + uri: oauth.com/token + timeout: 3s + credentials: + client_id: "secret" + token_secret: + name: token + hmac_secret: + name: hmac + cookie_names: + bearer_token: "?" + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + auth_scopes: + - user + - openid + - email + resources: + - oauth2-resource + - http://example.com + - https://example.com + )EOF"; + + OAuth2Config factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + NiceMock context; + + EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException, "value does not match regex pattern"); +} + } // namespace Oauth2 } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index dc4f4402cd79..52b2e81b9700 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -1,8 +1,8 @@ #include #include -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.h" -#include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.h" +#include "envoy/extensions/filters/http/oauth2/v3/oauth.pb.validate.h" #include "envoy/http/async_client.h" #include "envoy/http/message.h" @@ -96,7 +96,7 @@ class OAuth2Test : public testing::Test { // Set up proto fields with standard config. FilterConfigSharedPtr getConfig() { - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config p; + envoy::extensions::filters::http::oauth2::v3::OAuth2Config p; auto* endpoint = p.mutable_token_endpoint(); endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); @@ -119,6 +119,8 @@ class OAuth2Test : public testing::Test { credentials->set_client_id(TEST_CLIENT_ID); credentials->mutable_token_secret()->set_name("secret"); credentials->mutable_hmac_secret()->set_name("hmac"); + // Skipping setting credentials.cookie_names field should give default cookie names: + // BearerToken, OauthHMAC, and OauthExpires. MessageUtil::validate(p, ProtobufMessage::getStrictValidationVisitor()); @@ -141,6 +143,43 @@ class OAuth2Test : public testing::Test { return callbacks; } + // Validates the behavior of the cookie validator. + void expectValidCookies(const CookieNames& cookie_names) { + // Set SystemTime to a fixed point so we get consistent HMAC encodings between test runs. + test_time_.setSystemTime(SystemTime(std::chrono::seconds(0))); + + const auto expires_at_s = DateUtil::nowToSeconds(test_time_.timeSystem()) + 10; + + Http::TestRequestHeaderMapImpl request_headers{ + {Http::Headers::get().Host.get(), "traffic.example.com"}, + {Http::Headers::get().Path.get(), "/anypath"}, + {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, + {Http::Headers::get().Cookie.get(), + fmt::format("{}={};version=test", cookie_names.oauth_expires_, expires_at_s)}, + {Http::Headers::get().Cookie.get(), + absl::StrCat(cookie_names.bearer_token_, "=xyztoken;version=test")}, + {Http::Headers::get().Cookie.get(), + absl::StrCat(cookie_names.oauth_hmac_, "=" + "NGQ3MzVjZGExNGM5NTFiZGJjODBkMjBmYjAyYjNiOTFjMmNjYj" + "IxMTUzNmNiNWU0NjQzMmMxMWUzZmE2ZWJjYg==" + ";version=test")}, + }; + + auto cookie_validator = std::make_shared(test_time_, cookie_names); + EXPECT_EQ(cookie_validator->token(), ""); + cookie_validator->setParams(request_headers, "mock-secret"); + + EXPECT_TRUE(cookie_validator->hmacIsValid()); + EXPECT_TRUE(cookie_validator->timestampIsValid()); + EXPECT_TRUE(cookie_validator->isValid()); + + // If we advance time beyond 10s the timestamp should no longer be valid. + test_time_.advanceTimeWait(std::chrono::seconds(11)); + + EXPECT_FALSE(cookie_validator->timestampIsValid()); + EXPECT_FALSE(cookie_validator->isValid()); + } + NiceMock* attachmentTimeout_timer_{}; NiceMock factory_context_; NiceMock decoder_callbacks_; @@ -245,7 +284,7 @@ TEST_F(OAuth2Test, InvalidCluster) { TEST_F(OAuth2Test, DefaultAuthScope) { // Set up proto fields with no auth scope set. - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2Config p; + envoy::extensions::filters::http::oauth2::v3::OAuth2Config p; auto* endpoint = p.mutable_token_endpoint(); endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); @@ -493,37 +532,12 @@ TEST_F(OAuth2Test, OAuthOptionsRequestAndContinue) { // Validates the behavior of the cookie validator. TEST_F(OAuth2Test, CookieValidator) { - // Set SystemTime to a fixed point so we get consistent HMAC encodings between test runs. - test_time_.setSystemTime(SystemTime(std::chrono::seconds(0))); - - const auto expires_at_s = DateUtil::nowToSeconds(test_time_.timeSystem()) + 10; - - Http::TestRequestHeaderMapImpl request_headers{ - {Http::Headers::get().Host.get(), "traffic.example.com"}, - {Http::Headers::get().Path.get(), "/anypath"}, - {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().Cookie.get(), - fmt::format("OauthExpires={};version=test", expires_at_s)}, - {Http::Headers::get().Cookie.get(), "BearerToken=xyztoken;version=test"}, - {Http::Headers::get().Cookie.get(), - "OauthHMAC=" - "NGQ3MzVjZGExNGM5NTFiZGJjODBkMjBmYjAyYjNiOTFjMmNjYjIxMTUzNmNiNWU0NjQzMmMxMWUzZmE2ZWJjYg==" - ";version=test"}, - }; - - auto cookie_validator = std::make_shared(test_time_); - EXPECT_EQ(cookie_validator->token(), ""); - cookie_validator->setParams(request_headers, "mock-secret"); - - EXPECT_TRUE(cookie_validator->hmacIsValid()); - EXPECT_TRUE(cookie_validator->timestampIsValid()); - EXPECT_TRUE(cookie_validator->isValid()); - - // If we advance time beyond 10s the timestamp should no longer be valid. - test_time_.advanceTimeWait(std::chrono::seconds(11)); + expectValidCookies(CookieNames{"BearerToken", "OauthHMAC", "OauthExpires"}); +} - EXPECT_FALSE(cookie_validator->timestampIsValid()); - EXPECT_FALSE(cookie_validator->isValid()); +// Validates the behavior of the cookie validator with custom cookie names. +TEST_F(OAuth2Test, CookieValidatorWithCustomNames) { + expectValidCookies(CookieNames{"CustomBearerToken", "CustomOauthHMAC", "CustomOauthExpires"}); } // Validates the behavior of the cookie validator when the expires_at value is not a valid integer. @@ -540,7 +554,8 @@ TEST_F(OAuth2Test, CookieValidatorInvalidExpiresAt) { ";version=test"}, }; - auto cookie_validator = std::make_shared(test_time_); + auto cookie_validator = std::make_shared( + test_time_, CookieNames{"BearerToken", "OauthHMAC", "OauthExpires"}); cookie_validator->setParams(request_headers, "mock-secret"); EXPECT_TRUE(cookie_validator->hmacIsValid()); diff --git a/test/extensions/filters/http/oauth2/oauth_integration_test.cc b/test/extensions/filters/http/oauth2/oauth_integration_test.cc index cf675c65f643..256382e819f8 100644 --- a/test/extensions/filters/http/oauth2/oauth_integration_test.cc +++ b/test/extensions/filters/http/oauth2/oauth_integration_test.cc @@ -77,7 +77,7 @@ class OauthIntegrationTest : public testing::Test, public HttpIntegrationTest { config_helper_.prependFilter(TestEnvironment::substitute(R"EOF( name: oauth typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3.OAuth2 config: token_endpoint: cluster: oauth @@ -126,19 +126,24 @@ name: oauth bool validateHmac(const Http::ResponseHeaderMap& headers, absl::string_view host, absl::string_view hmac_secret) { - std::string expires = Http::Utility::parseSetCookieValue(headers, "OauthExpires"); - std::string token = Http::Utility::parseSetCookieValue(headers, "BearerToken"); - std::string hmac = Http::Utility::parseSetCookieValue(headers, "OauthHMAC"); + std::string expires = + Http::Utility::parseSetCookieValue(headers, default_cookie_names_.oauth_expires_); + std::string token = + Http::Utility::parseSetCookieValue(headers, default_cookie_names_.bearer_token_); + std::string hmac = + Http::Utility::parseSetCookieValue(headers, default_cookie_names_.oauth_hmac_); Http::TestRequestHeaderMapImpl validate_headers{{":authority", std::string(host)}}; - validate_headers.addReferenceKey(Http::Headers::get().Cookie, absl::StrCat("OauthHMAC=", hmac)); validate_headers.addReferenceKey(Http::Headers::get().Cookie, - absl::StrCat("OauthExpires=", expires)); + absl::StrCat(default_cookie_names_.oauth_hmac_, "=", hmac)); + validate_headers.addReferenceKey( + Http::Headers::get().Cookie, + absl::StrCat(default_cookie_names_.oauth_expires_, "=", expires)); validate_headers.addReferenceKey(Http::Headers::get().Cookie, - absl::StrCat("BearerToken=", token)); + absl::StrCat(default_cookie_names_.bearer_token_, "=", token)); - OAuth2CookieValidator validator{api_->timeSource()}; + OAuth2CookieValidator validator{api_->timeSource(), default_cookie_names_}; validator.setParams(validate_headers, std::string(hmac_secret)); return validator.isValid(); } @@ -191,6 +196,8 @@ name: oauth RELEASE_ASSERT(response->waitForEndStream(), "unexpected timeout"); codec_client_->close(); } + + const CookieNames default_cookie_names_{"BearerToken", "OauthHMAC", "OauthExpires"}; }; // Regular request gets redirected to the login page. diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 5c63045c7d87..8cc7751ea8e9 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -501,14 +501,15 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; Http::TestResponseHeaderMapImpl response_headers{ {":status", "429"}, {"x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True}}; EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, std::move(h), nullptr, "", nullptr); @@ -553,14 +554,15 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithDynamicMetadata) { EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, *dynamic_metadata)); })); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; Http::TestResponseHeaderMapImpl response_headers{ {":status", "429"}, {"x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True}}; EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, std::move(h), nullptr, "", std::move(dynamic_metadata)); @@ -598,6 +600,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}}; Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); @@ -606,8 +611,6 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::HeaderMapPtr request_headers_to_add{ new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; @@ -650,6 +653,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithBody) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + const std::string response_body = "this is a custom over limit response body."; const std::string content_length = std::to_string(response_body.length()); Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ @@ -669,8 +675,6 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithBody) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce( Invoke([&](Buffer::Instance& data, bool) { EXPECT_EQ(data.toString(), response_body); })); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::HeaderMapPtr request_headers_to_add{ new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; @@ -713,6 +717,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithBodyAndContentType) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + const std::string response_body = R"EOF( { "message": "this is a custom over limit response body as json.", "retry-after": "33" } )EOF"; @@ -738,8 +745,6 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithBodyAndContentType) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce( Invoke([&](Buffer::Instance& data, bool) { EXPECT_EQ(data.toString(), response_body); })); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::HeaderMapPtr request_headers_to_add{ new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; @@ -774,6 +779,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { request_callbacks_ = &callbacks; }))); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, @@ -790,8 +798,6 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { {"x-ratelimit-reset", "3"}}; EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); auto descriptor_statuses = { Envoy::RateLimit::buildDescriptorStatus( @@ -829,12 +835,13 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithoutEnvoyRateLimitedHeader) { EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; Http::TestResponseHeaderMapImpl response_headers{{":status", "429"}}; EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); - EXPECT_CALL(filter_callbacks_.stream_info_, - setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, std::move(h), nullptr, "", nullptr); diff --git a/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc index e0ebbe048ca2..a7bd41884a7f 100644 --- a/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc +++ b/test/extensions/filters/http/wasm/test_data/test_grpc_stream_cpp.cc @@ -81,16 +81,18 @@ FilterHeadersStatus GrpcStreamContextProto::onRequestHeaders(uint32_t, bool) { std::make_pair("source", "grpc_stream_proto")); if (root()->grpcStreamHandler("bogus service string", "service", "method", initial_metadata, std::unique_ptr( - new MyGrpcStreamHandler())) != WasmResult::ParseFailure) { - logError("unexpected bogus service string OK"); + new MyGrpcStreamHandler())) == WasmResult::ParseFailure) { + logError("expected bogus service parse failure"); } if (root()->grpcStreamHandler(grpc_service_string, "service", "bad method", initial_metadata, std::unique_ptr( - new MyGrpcStreamHandler())) != WasmResult::InternalFailure) { - logError("unexpected bogus method OK"); + new MyGrpcStreamHandler())) == WasmResult::InternalFailure) { + logError("expected bogus method call failure"); + } + if (root()->grpcStreamHandler(grpc_service_string, "service", "method", initial_metadata, + std::unique_ptr(new MyGrpcStreamHandler())) == WasmResult::Ok) { + logError("cluster call succeeded"); } - root()->grpcStreamHandler(grpc_service_string, "service", "method", initial_metadata, - std::unique_ptr(new MyGrpcStreamHandler())); return FilterHeadersStatus::StopIteration; } diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc index 773886c5c9f6..3077c669b51b 100644 --- a/test/extensions/filters/http/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -949,8 +949,6 @@ TEST_P(WasmHttpFilterTest, GrpcCall) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1029,8 +1027,6 @@ TEST_P(WasmHttpFilterTest, GrpcCallBadCall) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1073,8 +1069,6 @@ TEST_P(WasmHttpFilterTest, GrpcCallFailure) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1165,8 +1159,6 @@ TEST_P(WasmHttpFilterTest, GrpcCallCancel) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1226,8 +1218,6 @@ TEST_P(WasmHttpFilterTest, GrpcCallClose) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1287,8 +1277,6 @@ TEST_P(WasmHttpFilterTest, GrpcCallAfterDestroyed) { setupFilter(); if (id == "grpc_call_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); EXPECT_CALL(filter(), log_(spdlog::level::err, Eq(absl::string_view("bogus grpc_service accepted error")))); } else { @@ -1397,8 +1385,12 @@ TEST_P(WasmHttpFilterTest, GrpcStream) { setupGrpcStreamTest(callbacks, id); if (id == "grpc_stream_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus service parse failure")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus method call failure")))); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("cluster call succeeded")))); } else { cluster_manager_.initializeThreadLocalClusters({"cluster"}); EXPECT_CALL(filter(), log_(spdlog::level::err, @@ -1459,8 +1451,12 @@ TEST_P(WasmHttpFilterTest, GrpcStreamCloseLocal) { setupGrpcStreamTest(callbacks, id); if (id == "grpc_stream_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus service parse failure")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus method call failure")))); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("cluster call succeeded")))); } else { cluster_manager_.initializeThreadLocalClusters({"cluster"}); EXPECT_CALL(filter(), log_(spdlog::level::err, @@ -1520,8 +1516,12 @@ TEST_P(WasmHttpFilterTest, GrpcStreamCloseRemote) { setupGrpcStreamTest(callbacks, id); if (id == "grpc_stream_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus service parse failure")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus method call failure")))); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("cluster call succeeded")))); } else { cluster_manager_.initializeThreadLocalClusters({"cluster"}); EXPECT_CALL(filter(), log_(spdlog::level::err, @@ -1576,8 +1576,12 @@ TEST_P(WasmHttpFilterTest, GrpcStreamCancel) { setupGrpcStreamTest(callbacks, id); if (id == "grpc_stream_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus service parse failure")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus method call failure")))); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("cluster call succeeded")))); } else { cluster_manager_.initializeThreadLocalClusters({"cluster"}); EXPECT_CALL(filter(), log_(spdlog::level::err, @@ -1627,8 +1631,12 @@ TEST_P(WasmHttpFilterTest, GrpcStreamOpenAtShutdown) { setupGrpcStreamTest(callbacks, id); if (id == "grpc_stream_proto") { - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "false"}}); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus service parse failure")))); + EXPECT_CALL(filter(), log_(spdlog::level::err, + Eq(absl::string_view("expected bogus method call failure")))); + EXPECT_CALL(filter(), + log_(spdlog::level::err, Eq(absl::string_view("cluster call succeeded")))); } else { cluster_manager_.initializeThreadLocalClusters({"cluster"}); EXPECT_CALL(filter(), log_(spdlog::level::err, diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 8499c8e3428d..f5e68eae1d9a 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -44,11 +44,10 @@ class HttpInspectorTest : public testing::Test { if (include_inline_recv) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) - .WillOnce(Return(Api::SysCallSizeResult{static_cast(0), 0})); + .WillOnce(Return(Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN})); - EXPECT_CALL(dispatcher_, - createFileEvent_(_, _, Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed)) + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read)) .WillOnce(DoAll(SaveArg<1>(&file_event_callback_), ReturnNew>())); @@ -334,11 +333,10 @@ TEST_F(HttpInspectorTest, InspectHttp2) { TEST_F(HttpInspectorTest, ReadClosed) { init(); - EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)); - EXPECT_CALL(socket_, close()); - EXPECT_CALL(cb_, continueFilterChain(true)); - socket_.close(); - file_event_callback_(Event::FileReadyType::Closed); + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Return(Api::SysCallSizeResult{0, 0})); + EXPECT_CALL(cb_, continueFilterChain(false)); + file_event_callback_(Event::FileReadyType::Read); EXPECT_EQ(0, cfg_->stats().http2_found_.value()); } diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 763b774fe438..6a7b0634a8bb 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -703,7 +703,7 @@ TEST_P(ProxyProtocolTest, Fragmented) { } TEST_P(ProxyProtocolTest, V2Fragmented1) { - // A well-formed ipv4/tcp header, delivering part of the signature, then part of + // A well-formed ipv4/tcp message, delivering part of the signature, then part of // the address, then the remainder constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, @@ -725,7 +725,7 @@ TEST_P(ProxyProtocolTest, V2Fragmented1) { } TEST_P(ProxyProtocolTest, V2Fragmented2) { - // A well-formed ipv4/tcp header, delivering all of the signature + 1, then the remainder + // A well-formed ipv4/tcp message, delivering all of the header + 1, then the remainder constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm', 'o', @@ -746,8 +746,32 @@ TEST_P(ProxyProtocolTest, V2Fragmented2) { disconnect(); } -TEST_P(ProxyProtocolTest, V2Fragmented3Error) { - // A well-formed ipv4/tcp header, delivering all of the signature +1, w/ an error +TEST_P(ProxyProtocolTest, V2Fragmented3) { + // A well-formed ipv4/tcp message, delivering all of the header, then the remainder. + // Do not mistakenly consider that remote has closed when it happens to only read the + // header of the message. See: https://github.com/envoyproxy/envoy/pull/18304 + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 'm', 'o', + 'r', 'e', ' ', 'd', 'a', 't', 'a'}; + connect(); + write(buffer, 16); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + write(buffer + 16, 10); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + write(buffer + 26, 11); + + expectData("more data"); + + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), + "1.2.3.4"); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); + + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2Fragmented4Error) { + // A well-formed ipv4/tcp message, delivering all of the header +1, w/ an error // simulated in recv() on the +1 constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, @@ -813,8 +837,8 @@ TEST_P(ProxyProtocolTest, V2Fragmented3Error) { expectProxyProtoError(); } -TEST_P(ProxyProtocolTest, V2Fragmented4Error) { - // A well-formed ipv4/tcp header, part of the signature with an error introduced +TEST_P(ProxyProtocolTest, V2Fragmented5Error) { + // A well-formed ipv4/tcp message, part of the signature with an error introduced // in recv() on the remainder constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, 0x0a, 0x21, 0x11, 0x00, 0x0c, 0x01, 0x02, 0x03, 0x04, diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 902855a7b4c8..0a9d074d7138 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -17,6 +17,7 @@ using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; +using testing::Return; using testing::ReturnNew; using testing::ReturnRef; using testing::SaveArg; @@ -46,11 +47,10 @@ class TlsInspectorTest : public testing::TestWithParam Api::SysCallSizeResult { ENVOY_LOG_MISC(error, "In mock syscall recv {} {} {} {}", fd, buffer, length, flag); - return Api::SysCallSizeResult{static_cast(0), 0}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); - EXPECT_CALL(dispatcher_, - createFileEvent_(_, _, Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed)) + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read)) .WillOnce( DoAll(SaveArg<1>(&file_event_callback_), ReturnNew>())); filter_->onAccept(cb_); @@ -85,8 +85,10 @@ TEST_P(TlsInspectorTest, MaxClientHelloSize) { // Test that the filter detects Closed events and terminates. TEST_P(TlsInspectorTest, ConnectionClosed) { init(); + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Return(Api::SysCallSizeResult{0, 0})); EXPECT_CALL(cb_, continueFilterChain(false)); - file_event_callback_(Event::FileReadyType::Closed); + file_event_callback_(Event::FileReadyType::Read); EXPECT_EQ(1, cfg_->stats().connection_closed_.value()); } diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 index 21ad6d880835..3fe10beb50e5 100644 --- a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 @@ -1,7 +1,7 @@ config { name: "envoy.filters.network.sni_dynamic_forward_proxy" typed_config { - type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig" + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig" value: "\nP\nFenvoy.network.sni_dynamic_fo.filters.network.sni_dynamic_forward_proxy*\006\010\200\200\200\260\002" } } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 8cdbe6162927..d74e12fa8f77 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -832,7 +832,7 @@ TEST_F(ConnectionManagerTest, ResponseWithUnknownSequenceID) { TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); + writeHessianRequestMessage(buffer_, false, false, 233333); config_->setupFilterChain(2, 0); config_->expectOnDestroy(); @@ -847,8 +847,10 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { const std::string fake_response("mock dubbo response"); NiceMock direct_response; EXPECT_CALL(direct_response, encode(_, _, _)) - .WillOnce(Invoke([&](MessageMetadata&, Protocol&, + .WillOnce(Invoke([&](MessageMetadata& metadata, Protocol&, Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + // Validate request id. + EXPECT_EQ(metadata.requestId(), 233333); buffer.add(fake_response); return DubboFilters::DirectResponse::ResponseType::SuccessReply; })); @@ -878,7 +880,7 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); + writeHessianRequestMessage(buffer_, false, false, 233334); config_->setupFilterChain(2, 0); config_->expectOnDestroy(); @@ -893,8 +895,10 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { const std::string fake_response("mock dubbo response"); NiceMock direct_response; EXPECT_CALL(direct_response, encode(_, _, _)) - .WillOnce(Invoke([&](MessageMetadata&, Protocol&, + .WillOnce(Invoke([&](MessageMetadata& metadata, Protocol&, Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + // Validate request id. + EXPECT_EQ(metadata.requestId(), 233334); buffer.add(fake_response); return DubboFilters::DirectResponse::ResponseType::ErrorReply; })); diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index 0f2190a17d97..4fa54f5d13ba 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -206,6 +206,11 @@ TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_CALL( + filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzDenied)); EXPECT_CALL(*client_, cancel()).Times(0); request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied)); @@ -276,6 +281,11 @@ TEST_F(ExtAuthzFilterTest, FailClose) { EXPECT_CALL(filter_callbacks_.connection_, close(_)); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_CALL( + filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzError)); request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Error)); EXPECT_EQ(0U, stats_store_.counter("ext_authz.name.disabled").value()); @@ -429,7 +439,11 @@ TEST_F(ExtAuthzFilterTest, ImmediateNOK) { EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, dynamic_metadata)); })); - + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_CALL( + filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(Filters::Common::ExtAuthz::ResponseCodeDetails::get().AuthzDenied)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 18c1963ceacc..acd03ec923d1 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -2167,6 +2167,38 @@ stat_prefix: router route: cluster: cluster http_filters: +- name: foo + config_discovery: + config_source: { resource_api_version: V3, ads: {} } + default_config: + "@type": type.googleapis.com/xds.type.v3.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck +- name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: filter config has type URL envoy.extensions.filters.http.router.v3.Router but " + "expect envoy.extensions.filters.http.health_check.v3.HealthCheck."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultRequireTypeUrlWithOldTypedStruct) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: - name: foo config_discovery: config_source: { resource_api_version: V3, ads: {} } diff --git a/test/extensions/filters/network/ratelimit/BUILD b/test/extensions/filters/network/ratelimit/BUILD index f4a3ac2fa52e..ea09ac87dbb3 100644 --- a/test/extensions/filters/network/ratelimit/BUILD +++ b/test/extensions/filters/network/ratelimit/BUILD @@ -29,6 +29,7 @@ envoy_extension_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/tracing:tracing_mocks", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 6519261f14b5..ee11ffa8a5bc 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -22,7 +22,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/upstream:basic_resource_limit_mocks", "//test/mocks/upstream:cluster_manager_mocks", - "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc index 8a6d9c62a224..6d41a92a4298 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -39,7 +39,7 @@ class SniDynamicProxyFilterIntegrationTest fmt::format(R"EOF( name: envoy.filters.http.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3.FilterConfig dns_cache_config: name: foo dns_lookup_family: {} diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc index 5eda1b49de6b..cbd3816f3166 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.pb.h" +#include "envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.pb.h" #include "envoy/network/connection.h" #include "source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h" diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index cd3fe562efe1..cefb499da6a7 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -277,6 +277,7 @@ envoy_extension_cc_test( "//test/mocks/upstream:host_mocks", "//test/test_common:printers_lib", "//test/test_common:registry_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/filter/thrift/router/v2alpha1:pkg_cc_proto", ], ) @@ -353,6 +354,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_names = ["envoy.filters.network.thrift_proxy"], + shard_count = 4, deps = [ ":integration_lib", ":utility_lib", @@ -378,6 +380,7 @@ envoy_extension_cc_test( "//test/mocks/upstream:host_mocks", "//test/test_common:printers_lib", "//test/test_common:registry_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 7ac6ee7bf94b..ff9dd784f622 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -1750,9 +1750,9 @@ payload_passthrough: true EXPECT_EQ(1U, store_.counter("test.response_reply").value()); EXPECT_EQ(0U, store_.counter("test.response_exception").value()); EXPECT_EQ(0U, store_.counter("test.response_invalid_type").value()); - // In payload_passthrough mode, Envoy cannot detect response error. - EXPECT_EQ(1U, store_.counter("test.response_success").value()); - EXPECT_EQ(0U, store_.counter("test.response_error").value()); + EXPECT_EQ(1U, store_.counter("test.response_passthrough").value()); + EXPECT_EQ(0U, store_.counter("test.response_success").value()); + EXPECT_EQ(1U, store_.counter("test.response_error").value()); } TEST_F(ThriftConnectionManagerTest, PayloadPassthroughRequestAndInvalidResponse) { diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index d13f9ddd2975..179861e24496 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -1,5 +1,6 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "source/common/common/fmt.h" #include "source/extensions/filters/network/thrift_proxy/buffer_helper.h" #include "test/extensions/filters/network/thrift_proxy/integration.h" @@ -148,6 +149,11 @@ class ThriftConnManagerIntegrationTest // while oneway's are handled by the "poke" method. All other requests // are handled by "execute". FakeUpstream* getExpectedUpstream(bool oneway) { + int upstreamIdx = getExpectedUpstreamIdx(oneway); + return fake_upstreams_[upstreamIdx].get(); + } + + int getExpectedUpstreamIdx(bool oneway) { int upstreamIdx = 2; if (multiplexed_) { upstreamIdx = 0; @@ -157,7 +163,7 @@ class ThriftConnManagerIntegrationTest upstreamIdx = 1; } - return fake_upstreams_[upstreamIdx].get(); + return upstreamIdx; } TransportType transport_; @@ -225,8 +231,29 @@ TEST_P(ThriftConnManagerIntegrationTest, Success) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + if (payload_passthrough_ && + (transport_ == TransportType::Framed || transport_ == TransportType::Header) && + protocol_ != ProtocolType::Twitter) { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); + } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_reply", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_success", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, IDLException) { @@ -252,13 +279,28 @@ TEST_P(ThriftConnManagerIntegrationTest, IDLException) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); - counter = test_server_->counter("thrift.thrift_stats.response_error"); - if (payload_passthrough_ && transport_ == TransportType::Framed && + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + if (payload_passthrough_ && + (transport_ == TransportType::Framed || transport_ == TransportType::Header) && protocol_ != ProtocolType::Twitter) { - EXPECT_EQ(0U, counter->value()); - } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_error"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_reply", upstream_idx)); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_error", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, Exception) { @@ -284,8 +326,15 @@ TEST_P(ThriftConnManagerIntegrationTest, Exception) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_exception"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_exception", upstream_idx)); + EXPECT_EQ(1U, counter->value()); } TEST_P(ThriftConnManagerIntegrationTest, EarlyClose) { @@ -361,6 +410,10 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyUpstreamClose) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_exception"); EXPECT_EQ(1U, counter->value()); } @@ -492,10 +545,18 @@ TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { EXPECT_TRUE(TestUtility::buffersEqual( Buffer::OwnedImpl(tcp_client->data().substr(upgrade_response_size)), response_bytes_)); + // 2 requests on downstream but the first is an upgrade, so only one on upstream side Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(2U, counter->value()); + int upstream_idx = getExpectedUpstreamIdx(false); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_rq_call", upstream_idx)); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(2U, counter->value()); + counter = test_server_->counter( + fmt::format("cluster.cluster_{}.thrift.upstream_resp_success", upstream_idx)); + EXPECT_EQ(1U, counter->value()); #endif } diff --git a/test/extensions/filters/network/thrift_proxy/mocks.h b/test/extensions/filters/network/thrift_proxy/mocks.h index b3eddda2cb35..97b59928e580 100644 --- a/test/extensions/filters/network/thrift_proxy/mocks.h +++ b/test/extensions/filters/network/thrift_proxy/mocks.h @@ -10,6 +10,7 @@ #include "source/extensions/filters/network/thrift_proxy/protocol.h" #include "source/extensions/filters/network/thrift_proxy/router/router.h" #include "source/extensions/filters/network/thrift_proxy/router/router_ratelimit.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" #include "source/extensions/filters/network/thrift_proxy/transport.h" #include "test/mocks/network/mocks.h" @@ -65,6 +66,7 @@ class MockProtocol : public Protocol { MOCK_METHOD(void, setType, (ProtocolType)); MOCK_METHOD(bool, readMessageBegin, (Buffer::Instance & buffer, MessageMetadata& metadata)); MOCK_METHOD(bool, readMessageEnd, (Buffer::Instance & buffer)); + MOCK_METHOD(bool, peekReplyPayload, (Buffer::Instance & buffer, ReplyType& reply_type)); MOCK_METHOD(bool, readStructBegin, (Buffer::Instance & buffer, std::string& name)); MOCK_METHOD(bool, readStructEnd, (Buffer::Instance & buffer)); MOCK_METHOD(bool, readFieldBegin, @@ -360,8 +362,6 @@ class MockShadowWriter : public ShadowWriter { ~MockShadowWriter() override; MOCK_METHOD(Upstream::ClusterManager&, clusterManager, (), ()); - MOCK_METHOD(std::string&, statPrefix, (), (const)); - MOCK_METHOD(Stats::Scope&, scope, (), ()); MOCK_METHOD(Event::Dispatcher&, dispatcher, (), ()); MOCK_METHOD(absl::optional>, submit, (const std::string&, MessageMetadataSharedPtr, TransportType, ProtocolType), ()); diff --git a/test/extensions/filters/network/thrift_proxy/requirements.txt b/test/extensions/filters/network/thrift_proxy/requirements.txt index f5868c4c7d44..4b7d0cd7ca83 100644 --- a/test/extensions/filters/network/thrift_proxy/requirements.txt +++ b/test/extensions/filters/network/thrift_proxy/requirements.txt @@ -1,5 +1,5 @@ -thrift==0.13.0 \ - --hash=sha256:9af1c86bf73433afc6010ed376a6c6aca2b54099cc0d61895f640870a9ae7d89 six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 +thrift==0.15.0 \ + --hash=sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29 diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index 22bcf7ca0910..c001013f0188 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -1,5 +1,6 @@ #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/filter/thrift/router/v2alpha1/router.pb.h" #include "envoy/config/filter/thrift/router/v2alpha1/router.pb.validate.h" #include "envoy/tcp/conn_pool.h" @@ -111,14 +112,15 @@ class ThriftRouterTestBase { route_ = new NiceMock(); route_ptr_.reset(route_); + stats_ = std::make_shared("test", context_.scope(), context_.localInfo()); if (!use_real_shadow_writer) { - router_ = std::make_unique(context_.clusterManager(), "test", context_.scope(), - context_.runtime(), shadow_writer_); + router_ = std::make_unique(context_.clusterManager(), *stats_, context_.runtime(), + shadow_writer_); } else { - shadow_writer_impl_ = std::make_shared( - context_.clusterManager(), "test", context_.scope(), dispatcher_, context_.threadLocal()); - router_ = std::make_unique(context_.clusterManager(), "test", context_.scope(), - context_.runtime(), *shadow_writer_impl_); + shadow_writer_impl_ = std::make_shared(context_.clusterManager(), *stats_, + dispatcher_, context_.threadLocal()); + router_ = std::make_unique(context_.clusterManager(), *stats_, context_.runtime(), + *shadow_writer_impl_); } EXPECT_EQ(nullptr, router_->downstreamConnection()); @@ -136,6 +138,102 @@ class ThriftRouterTestBase { metadata_->setSequenceId(sequence_id); } + void verifyMetadataMatchCriteriaFromRequest(bool route_entry_has_match) { + ProtobufWkt::Struct request_struct; + ProtobufWkt::Value val; + + // Populate metadata like StreamInfo.setDynamicMetadata() would. + auto& fields_map = *request_struct.mutable_fields(); + val.set_string_value("v3.1"); + fields_map["version"] = val; + val.set_string_value("devel"); + fields_map["stage"] = val; + val.set_string_value("1"); + fields_map["xkey_in_request"] = val; + (*callbacks_.stream_info_.metadata_ + .mutable_filter_metadata())[Envoy::Config::MetadataFilters::get().ENVOY_LB] = + request_struct; + + // Populate route entry's metadata which will be overridden. + val.set_string_value("v3.0"); + fields_map = *request_struct.mutable_fields(); + fields_map["version"] = val; + fields_map.erase("xkey_in_request"); + Envoy::Router::MetadataMatchCriteriaImpl route_entry_matches(request_struct); + + if (route_entry_has_match) { + ON_CALL(route_entry_, metadataMatchCriteria()).WillByDefault(Return(&route_entry_matches)); + } else { + ON_CALL(route_entry_, metadataMatchCriteria()).WillByDefault(Return(nullptr)); + } + + auto match = router_->metadataMatchCriteria()->metadataMatchCriteria(); + EXPECT_EQ(match.size(), 3); + auto it = match.begin(); + + // Note: metadataMatchCriteria() keeps its entries sorted, so the order for checks + // below matters. + + // `stage` was only set by the request, not by the route entry. + EXPECT_EQ((*it)->name(), "stage"); + EXPECT_EQ((*it)->value().value().string_value(), "devel"); + it++; + + // `version` should be what came from the request and override the route entry's. + EXPECT_EQ((*it)->name(), "version"); + EXPECT_EQ((*it)->value().value().string_value(), "v3.1"); + it++; + + // `xkey_in_request` was only set by the request + EXPECT_EQ((*it)->name(), "xkey_in_request"); + EXPECT_EQ((*it)->value().value().string_value(), "1"); + } + + void verifyMetadataMatchCriteriaFromRoute(bool route_entry_has_match) { + ProtobufWkt::Struct route_struct; + ProtobufWkt::Value val; + + auto& fields_map = *route_struct.mutable_fields(); + val.set_string_value("v3.1"); + fields_map["version"] = val; + val.set_string_value("devel"); + fields_map["stage"] = val; + + Envoy::Router::MetadataMatchCriteriaImpl route_entry_matches(route_struct); + + if (route_entry_has_match) { + ON_CALL(route_entry_, metadataMatchCriteria()).WillByDefault(Return(&route_entry_matches)); + + EXPECT_NE(nullptr, router_->metadataMatchCriteria()); + auto match = router_->metadataMatchCriteria()->metadataMatchCriteria(); + EXPECT_EQ(match.size(), 2); + auto it = match.begin(); + + // Note: metadataMatchCriteria() keeps its entries sorted, so the order for checks + // below matters. + + // `stage` was set by the route entry. + EXPECT_EQ((*it)->name(), "stage"); + EXPECT_EQ((*it)->value().value().string_value(), "devel"); + it++; + + // `version` was set by the route entry. + EXPECT_EQ((*it)->name(), "version"); + EXPECT_EQ((*it)->value().value().string_value(), "v3.1"); + } else { + ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria()) + .WillByDefault(Return(nullptr)); + + EXPECT_EQ(nullptr, router_->metadataMatchCriteria()); + } + } + + void initializeUpstreamZone() { + upstream_locality_.set_zone("other_zone_name"); + ON_CALL(*context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_, locality()) + .WillByDefault(ReturnRef(upstream_locality_)); + } + void startRequest(MessageType msg_type, std::string method = "method", const bool strip_service_name = false, const TransportType transport_type = TransportType::Framed, @@ -482,6 +580,7 @@ class ThriftRouterTestBase { NiceMock context_; std::unique_ptr router_; + std::shared_ptr stats_; MockShadowWriter shadow_writer_; std::shared_ptr shadow_writer_impl_; @@ -496,7 +595,7 @@ class ThriftRouterTestBase { int32_t protocols_requested_{}; NiceMock* route_{}; NiceMock route_entry_; - NiceMock* host_{}; + envoy::config::core::v3::Locality upstream_locality_; Tcp::ConnectionPool::ConnectionStatePtr conn_state_; RouteConstSharedPtr route_ptr_; @@ -585,6 +684,15 @@ TEST_F(ThriftRouterTest, PoolRemoteConnectionFailure) { putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception_local") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception") + .value()); + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); } TEST_F(ThriftRouterTest, PoolLocalConnectionFailure) { @@ -600,6 +708,9 @@ TEST_F(ThriftRouterTest, PoolLocalConnectionFailure) { putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::LocalConnectionFailure); + + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); } TEST_F(ThriftRouterTest, PoolTimeout) { @@ -623,6 +734,15 @@ TEST_F(ThriftRouterTest, PoolTimeout) { putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::Timeout); + + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception_local") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception") + .value()); + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); } TEST_F(ThriftRouterTest, PoolOverflowFailure) { @@ -643,6 +763,15 @@ TEST_F(ThriftRouterTest, PoolOverflowFailure) { })); context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::Overflow, true); + + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception_local") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception") + .value()); + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); } TEST_F(ThriftRouterTest, PoolConnectionFailureWithOnewayMessage) { @@ -658,6 +787,12 @@ TEST_F(ThriftRouterTest, PoolConnectionFailureWithOnewayMessage) { context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.poolFailure( ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception") + .value()); + EXPECT_EQ(0UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); + destroyRouter(); } @@ -697,6 +832,42 @@ TEST_F(ThriftRouterTest, NoCluster) { EXPECT_EQ(1U, context_.scope().counterFromString("test.unknown_cluster").value()); } +// Test the case where both dynamic metadata match criteria +// and route metadata match criteria is not empty. +TEST_F(ThriftRouterTest, MetadataMatchCriteriaFromRequest) { + initializeRouter(); + initializeMetadata(MessageType::Call); + + verifyMetadataMatchCriteriaFromRequest(true); +} + +// Test the case where route metadata match criteria is empty +// but with non-empty dynamic metadata match criteria. +TEST_F(ThriftRouterTest, MetadataMatchCriteriaFromRequestNoRouteEntryMatch) { + initializeRouter(); + initializeMetadata(MessageType::Call); + + verifyMetadataMatchCriteriaFromRequest(false); +} + +// Test the case where dynamic metadata match criteria is empty +// but with non-empty route metadata match criteria. +TEST_F(ThriftRouterTest, MetadataMatchCriteriaFromRoute) { + initializeRouter(); + startRequest(MessageType::Call); + + verifyMetadataMatchCriteriaFromRoute(true); +} + +// Test the case where both dynamic metadata match criteria +// and route metadata match criteria is empty. +TEST_F(ThriftRouterTest, MetadataMatchCriteriaFromRouteNoRouteEntryMatch) { + initializeRouter(); + startRequest(MessageType::Call); + + verifyMetadataMatchCriteriaFromRoute(false); +} + TEST_F(ThriftRouterTest, ClusterMaintenanceMode) { initializeRouter(); initializeMetadata(MessageType::Call); @@ -1123,6 +1294,8 @@ TEST_F(ThriftRouterTest, PoolTimeoutUpstreamTimeMeasurement) { startRequest(MessageType::Call); dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); + EXPECT_CALL(cluster_scope, counter("thrift.upstream_resp_exception")); + EXPECT_CALL(cluster_scope, counter("thrift.upstream_resp_exception_local")); EXPECT_CALL(cluster_scope, histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)) .Times(0); @@ -1287,6 +1460,9 @@ TEST_P(ThriftRouterFieldTypeTest, Exception) { EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_resp_exception") .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception_remote") + .value()); } TEST_P(ThriftRouterFieldTypeTest, UnknownMessageTypes) { @@ -1603,6 +1779,95 @@ TEST_F(ThriftRouterTest, ShadowRequests) { shadow_writer_impl_ = nullptr; } +TEST_F(ThriftRouterTest, UpstreamZoneCallSuccess) { + initializeRouter(); + initializeUpstreamZone(); + startRequest(MessageType::Call); + connectUpstream(); + sendTrivialStruct(FieldType::I32); + completeRequest(); + returnResponse(); + + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_reply") + .value()); + EXPECT_EQ(1UL, + context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_success") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_success_.value()); +} + +TEST_F(ThriftRouterTest, UpstreamZoneCallError) { + initializeRouter(); + initializeUpstreamZone(); + startRequest(MessageType::Call); + connectUpstream(); + sendTrivialStruct(FieldType::I32); + completeRequest(); + returnResponse(MessageType::Reply, false); + + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_reply") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_error") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); +} + +TEST_F(ThriftRouterTest, UpstreamZoneCallException) { + initializeRouter(); + initializeUpstreamZone(); + startRequest(MessageType::Call); + connectUpstream(); + sendTrivialStruct(FieldType::I32); + completeRequest(); + returnResponse(MessageType::Exception); + EXPECT_EQ(1UL, + context_.cluster_manager_.thread_local_cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_exception") + .value()); + EXPECT_EQ(1UL, context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.host_->stats_ + .rq_error_.value()); +} + +TEST_F(ThriftRouterTest, UpstreamZoneCallWithRqTime) { + NiceMock cluster_scope; + ON_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, statsScope()) + .WillByDefault(ReturnRef(cluster_scope)); + + initializeRouter(); + initializeUpstreamZone(); + startRequest(MessageType::Call); + connectUpstream(); + sendTrivialStruct(FieldType::I32); + completeRequest(); + + dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); + EXPECT_CALL(cluster_scope, histogram("thrift.upstream_resp_size", Stats::Histogram::Unit::Bytes)); + EXPECT_CALL(cluster_scope, + deliverHistogramToSinks( + testing::Property(&Stats::Metric::name, "thrift.upstream_resp_size"), _)); + + EXPECT_CALL(cluster_scope, + histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)); + EXPECT_CALL(cluster_scope, + deliverHistogramToSinks( + testing::Property(&Stats::Metric::name, "thrift.upstream_rq_time"), _)); + + EXPECT_CALL(cluster_scope, histogram("zone.zone_name.other_zone_name.thrift.upstream_rq_time", + Stats::Histogram::Unit::Milliseconds)); + EXPECT_CALL(cluster_scope, + deliverHistogramToSinks( + testing::Property(&Stats::Metric::name, + "zone.zone_name.other_zone_name.thrift.upstream_rq_time"), + 500)); + returnResponse(); +} + } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc index 5532da024e32..1a15a105ea1d 100644 --- a/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc +++ b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc @@ -1,5 +1,6 @@ #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h" #include "envoy/tcp/conn_pool.h" @@ -38,14 +39,16 @@ struct MockNullResponseDecoder : public NullResponseDecoder { class ShadowWriterTest : public testing::Test { public: ShadowWriterTest() { - shadow_writer_ = std::make_shared(cm_, "test", context_.scope(), dispatcher_, - context_.threadLocal()); + stats_ = std::make_shared("test", context_.scope(), context_.localInfo()); + shadow_writer_ = + std::make_shared(cm_, *stats_, dispatcher_, context_.threadLocal()); metadata_ = std::make_shared(); metadata_->setMethodName("ping"); metadata_->setMessageType(MessageType::Call); metadata_->setSequenceId(1); - + upstream_locality_.set_zone("other_zone_name"); host_ = std::make_shared>(); + ON_CALL(*host_, locality()).WillByDefault(ReturnRef(upstream_locality_)); } void testPoolReady(bool oneway = false) { @@ -147,6 +150,10 @@ class ShadowWriterTest : public testing::Test { MessageMetadataSharedPtr response_metadata = std::make_shared(); response_metadata->setMessageType(message_type); response_metadata->setSequenceId(1); + if (message_type == MessageType::Reply) { + const auto reply_type = success ? ReplyType::Success : ReplyType::Error; + response_metadata->setReplyType(reply_type); + } auto transport_ptr = NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); @@ -181,20 +188,36 @@ class ShadowWriterTest : public testing::Test { EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_resp_reply") .value()); + EXPECT_EQ(1UL, + cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_reply") + .value()); if (success) { EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_resp_success") .value()); + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString( + "zone.zone_name.other_zone_name.thrift.upstream_resp_success") + .value()); } else { EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_resp_error") .value()); + EXPECT_EQ( + 1UL, cluster_.cluster_.info_->statsScope() + .counterFromString("zone.zone_name.other_zone_name.thrift.upstream_resp_error") + .value()); } break; case MessageType::Exception: EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() .counterFromString("thrift.upstream_resp_exception") .value()); + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString( + "zone.zone_name.other_zone_name.thrift.upstream_resp_exception") + .value()); break; default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -247,6 +270,8 @@ class ShadowWriterTest : public testing::Test { MessageMetadataSharedPtr metadata_; NiceMock conn_pool_; std::shared_ptr> host_; + envoy::config::core::v3::Locality upstream_locality_; + std::shared_ptr stats_; std::shared_ptr shadow_writer_; }; @@ -412,22 +437,14 @@ TEST_F(ShadowWriterTest, TestNullResponseDecoder) { EXPECT_TRUE(decoder_ptr->passthroughEnabled()); metadata_->setMessageType(MessageType::Reply); + metadata_->setReplyType(ReplyType::Success); EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); + EXPECT_TRUE(decoder_ptr->responseSuccess()); Buffer::OwnedImpl buffer; decoder_ptr->upstreamData(buffer); - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageEnd()); - // First reply field. - { - FieldType field_type; - int16_t field_id = 0; - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->fieldBegin("", field_type, field_id)); - EXPECT_TRUE(decoder_ptr->responseSuccess()); - } - EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportBegin(nullptr)); EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportEnd()); } diff --git a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc index 9bead20a44ba..8319b926e95c 100644 --- a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc @@ -20,7 +20,7 @@ namespace ThriftProxy { class ThriftTranslationIntegrationTest : public testing::TestWithParam< - std::tuple>, + std::tuple>, public BaseThriftIntegrationTest { public: static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) @@ -42,13 +42,11 @@ class ThriftTranslationIntegrationTest } void initialize() override { - TransportType downstream_transport, upstream_transport; - ProtocolType downstream_protocol, upstream_protocol; - std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) = - GetParam(); + std::tie(downstream_transport_, downstream_protocol_, upstream_transport_, upstream_protocol_, + passthrough_) = GetParam(); - auto upstream_transport_proto = transportTypeToProto(upstream_transport); - auto upstream_protocol_proto = protocolTypeToProto(upstream_protocol); + auto upstream_transport_proto = transportTypeToProto(upstream_transport_); + auto upstream_protocol_proto = protocolTypeToProto(upstream_protocol_); envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions proto_opts; proto_opts.set_transport(upstream_transport_proto); @@ -61,27 +59,43 @@ class ThriftTranslationIntegrationTest (*opts)[NetworkFilterNames::get().ThriftProxy].PackFrom(proto_opts); }); + if (passthrough_) { + config_helper_.addFilterConfigModifier< + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy>( + "thrift", [](Protobuf::Message& filter) { + auto& conn_manager = + dynamic_cast( + filter); + conn_manager.set_payload_passthrough(true); + }); + } + // Invent some varying, but deterministic, values to add. We use the add method instead of // execute because the default execute params contains a set and the ordering can vary across // generated payloads. std::vector args({ - fmt::format("{}", (static_cast(downstream_transport) << 8) + - static_cast(downstream_protocol)), - fmt::format("{}", (static_cast(upstream_transport) << 8) + - static_cast(upstream_protocol)), + fmt::format("{}", (static_cast(downstream_transport_) << 8) + + static_cast(downstream_protocol_)), + fmt::format("{}", (static_cast(upstream_transport_) << 8) + + static_cast(upstream_protocol_)), }); - PayloadOptions downstream_opts(downstream_transport, downstream_protocol, DriverMode::Success, + PayloadOptions downstream_opts(downstream_transport_, downstream_protocol_, DriverMode::Success, {}, "add", args); preparePayloads(downstream_opts, downstream_request_bytes_, downstream_response_bytes_); - PayloadOptions upstream_opts(upstream_transport, upstream_protocol, DriverMode::Success, {}, + PayloadOptions upstream_opts(upstream_transport_, upstream_protocol_, DriverMode::Success, {}, "add", args); preparePayloads(upstream_opts, upstream_request_bytes_, upstream_response_bytes_); BaseThriftIntegrationTest::initialize(); } + TransportType downstream_transport_; + ProtocolType downstream_protocol_; + TransportType upstream_transport_; + ProtocolType upstream_protocol_; + bool passthrough_; Buffer::OwnedImpl downstream_request_bytes_; Buffer::OwnedImpl downstream_response_bytes_; Buffer::OwnedImpl upstream_request_bytes_; @@ -89,17 +103,22 @@ class ThriftTranslationIntegrationTest }; static std::string paramToString( - const TestParamInfo>& + const TestParamInfo>& params) { TransportType downstream_transport, upstream_transport; ProtocolType downstream_protocol, upstream_protocol; - std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol) = - params.param; - - return fmt::format("From{}{}To{}{}", transportNameForTest(downstream_transport), - protocolNameForTest(downstream_protocol), - transportNameForTest(upstream_transport), - protocolNameForTest(upstream_protocol)); + bool passthrough; + std::tie(downstream_transport, downstream_protocol, upstream_transport, upstream_protocol, + passthrough) = params.param; + + auto result = + fmt::format("From{}{}To{}{}", transportNameForTest(downstream_transport), + protocolNameForTest(downstream_protocol), + transportNameForTest(upstream_transport), protocolNameForTest(upstream_protocol)); + if (passthrough) { + result = fmt::format("{}Passthrough", result); + } + return result; } INSTANTIATE_TEST_SUITE_P( @@ -107,7 +126,7 @@ INSTANTIATE_TEST_SUITE_P( Combine(Values(TransportType::Framed, TransportType::Unframed, TransportType::Header), Values(ProtocolType::Binary, ProtocolType::Compact), Values(TransportType::Framed, TransportType::Unframed, TransportType::Header), - Values(ProtocolType::Binary, ProtocolType::Compact)), + Values(ProtocolType::Binary, ProtocolType::Compact), Values(false, true)), paramToString); // Tests that the proxy will translate between different downstream and upstream transports and @@ -135,8 +154,32 @@ TEST_P(ThriftTranslationIntegrationTest, Translates) { Stats::CounterSharedPtr counter = test_server_->counter("thrift.thrift_stats.request_call"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_rq_call"); + EXPECT_EQ(1U, counter->value()); + if (passthrough_ && + (downstream_transport_ == TransportType::Framed || + downstream_transport_ == TransportType::Header) && + (upstream_transport_ == TransportType::Framed || + upstream_transport_ == TransportType::Header) && + downstream_protocol_ == upstream_protocol_ && downstream_protocol_ != ProtocolType::Twitter) { + counter = test_server_->counter("thrift.thrift_stats.request_passthrough"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(1U, counter->value()); + } else { + counter = test_server_->counter("thrift.thrift_stats.request_passthrough"); + EXPECT_EQ(0U, counter->value()); + counter = test_server_->counter("thrift.thrift_stats.response_passthrough"); + EXPECT_EQ(0U, counter->value()); + } + counter = test_server_->counter("thrift.thrift_stats.response_reply"); + EXPECT_EQ(1U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_resp_reply"); + EXPECT_EQ(1U, counter->value()); + counter = test_server_->counter("cluster.cluster_0.thrift.upstream_resp_success"); + EXPECT_EQ(1U, counter->value()); } } // namespace ThriftProxy diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index c8aad7a22df8..514152020ff3 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -28,15 +28,21 @@ envoy_extension_cc_test_library( envoy_extension_cc_test( name = "dns_filter_test", srcs = ["dns_filter_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], extension_names = ["envoy.filters.udp_listener.dns_filter"], deps = [ ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//source/extensions/network/dns_resolver/cares:config", "//test/mocks/server:instance_mocks", "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "//test/test_common:registry_lib", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) @@ -62,7 +68,7 @@ envoy_extension_cc_test( "//source/extensions/filters/udp/dns_filter:config", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", "//test/integration:integration_lib", - "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc index e610191885f5..23280bd51238 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -83,18 +83,21 @@ name: listener_0 listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - resolvers: - - socket_address: - address: {} - port_value: {} - dns_resolver_options: - use_tcp_for_dns_lookups: false - no_default_search_domain: false + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: {} + port_value: {} + dns_resolver_options: + use_tcp_for_dns_lookups: false + no_default_search_domain: false max_pending_lookups: 256 server_config: inline_dns_table: @@ -151,7 +154,7 @@ name: listener_1 listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3.DnsFilterConfig' stat_prefix: "external_resolver" server_config: inline_dns_table: diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index db9d906fff1f..e212f9775e11 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "source/common/common/logger.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_constants.h" @@ -9,6 +9,7 @@ #include "test/mocks/server/instance.h" #include "test/mocks/server/listener_factory_context.h" #include "test/test_common/environment.h" +#include "test/test_common/registry.h" #include "test/test_common/simulated_time_system.h" #include "dns_filter_test_utils.h" @@ -68,7 +69,7 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime } void setup(const std::string& yaml) { - envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config; + envoy::extensions::filters::udp::dns_filter::v3::DnsFilterConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); auto store = stats_store_.createScope("dns_scope"); ON_CALL(listener_factory_, scope()).WillByDefault(ReturnRef(*store)); @@ -77,11 +78,21 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime ON_CALL(listener_factory_, random()).WillByDefault(ReturnRef(random_)); resolver_ = std::make_shared(); - ON_CALL(dispatcher_, createDnsResolver(_, _)) - .WillByDefault(DoAll(SaveArg<1>(&dns_resolver_options_), Return(resolver_))); + NiceMock dns_resolver_factory_; + Registry::InjectFactory registered_dns_factory_( + dns_resolver_factory_); + ON_CALL(dns_resolver_factory_, createDnsResolver(_, _, _)) + .WillByDefault(DoAll(SaveArg<2>(&typed_dns_resolver_config_), Return(resolver_))); config_ = std::make_shared(listener_factory_, config); filter_ = std::make_unique(callbacks_, config_); + // Verify typed DNS resolver config is c-ares. + EXPECT_EQ(typed_dns_resolver_config_.name(), std::string(Network::CaresDnsResolver)); + EXPECT_EQ(typed_dns_resolver_config_.typed_config().type_url(), + "type.googleapis.com/" + "envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig"); + typed_dns_resolver_config_.typed_config().UnpackTo(&cares_); + dns_resolver_options_.MergeFrom(cares_.dns_resolver_options()); } void sendQueryFromClient(const std::string& peer_address, const std::string& buffer) { @@ -95,6 +106,8 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime const Network::Address::InstanceConstSharedPtr listener_address_; envoy::config::core::v3::DnsResolverOptions dns_resolver_options_; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config_; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares_; NiceMock random_; Api::ApiPtr api_; DnsFilterEnvoyConfigSharedPtr config_; @@ -176,17 +189,20 @@ stat_prefix: "my_prefix" stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - resolvers: - - socket_address: - address: "1.1.1.1" - port_value: 53 - - socket_address: - address: "8.8.8.8" - port_value: 53 - - socket_address: - address: "8.8.4.4" - port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 + - socket_address: + address: "8.8.8.8" + port_value: 53 + - socket_address: + address: "8.8.4.4" + port_value: 53 max_pending_lookups: 1 server_config: inline_dns_table: @@ -203,11 +219,14 @@ stat_prefix: "my_prefix" stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - resolvers: - - socket_address: - address: "1.1.1.1" - port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 max_pending_lookups: 256 server_config: external_dns_table: @@ -218,11 +237,14 @@ stat_prefix: "my_prefix" stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - resolvers: - - socket_address: - address: "1.1.1.1" - port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 max_pending_lookups: 256 server_config: external_dns_table: @@ -233,14 +255,17 @@ stat_prefix: "my_prefix" stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - dns_resolver_options: - use_tcp_for_dns_lookups: false - no_default_search_domain: false - resolvers: - - socket_address: - address: "1.1.1.1" - port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + dns_resolver_options: + use_tcp_for_dns_lookups: false + no_default_search_domain: false + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 max_pending_lookups: 256 server_config: external_dns_table: @@ -251,14 +276,17 @@ stat_prefix: "my_prefix" stat_prefix: "my_prefix" client_config: resolver_timeout: 1s - dns_resolution_config: - dns_resolver_options: - use_tcp_for_dns_lookups: true - no_default_search_domain: true - resolvers: - - socket_address: - address: "1.1.1.1" - port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 max_pending_lookups: 256 server_config: external_dns_table: @@ -2141,6 +2169,167 @@ TEST_F(DnsFilterTest, DnsResolverOptionsSetFalse) { EXPECT_EQ(false, dns_resolver_options_.no_default_search_domain()); } +TEST_F(DnsFilterTest, DEPRECATED_FEATURE_TEST(DnsResolutionConfigExist)) { + const std::string dns_resolution_config_exist = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + dns_resolution_config: + dns_resolver_options: + use_tcp_for_dns_lookups: false + no_default_search_domain: false + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 + max_pending_lookups: 256 +server_config: + external_dns_table: + filename: {} +)EOF"; + + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", max_records_table_yaml); + std::string config_to_use = fmt::format(dns_resolution_config_exist, temp_path); + setup(config_to_use); + + EXPECT_EQ(false, dns_resolver_options_.use_tcp_for_dns_lookups()); + EXPECT_EQ(false, dns_resolver_options_.no_default_search_domain()); + + // address matches + auto resolvers = envoy::config::core::v3::Address(); + resolvers.mutable_socket_address()->set_address("1.1.1.1"); + resolvers.mutable_socket_address()->set_port_value(53); + + EXPECT_EQ(true, TestUtility::protoEqual(cares_.resolvers(0), resolvers)); +} + +// test typed_dns_resolver_config exits which overrides dns_resolution_config. +TEST_F(DnsFilterTest, DEPRECATED_FEATURE_TEST(TypedDnsResolverConfigOverrideDnsResolutionConfig)) { + const std::string typed_dns_resolver_config_exist = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + dns_resolution_config: + dns_resolver_options: + use_tcp_for_dns_lookups: false + no_default_search_domain: false + resolvers: + - socket_address: + address: "1.1.1.1" + port_value: 53 + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.2.3.4" + port_value: 80 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + max_pending_lookups: 256 +server_config: + external_dns_table: + filename: {} +)EOF"; + + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", max_records_table_yaml); + std::string config_to_use = fmt::format(typed_dns_resolver_config_exist, temp_path); + setup(config_to_use); + + EXPECT_EQ(true, dns_resolver_options_.use_tcp_for_dns_lookups()); + EXPECT_EQ(true, dns_resolver_options_.no_default_search_domain()); + + // address matches + auto resolvers = envoy::config::core::v3::Address(); + resolvers.mutable_socket_address()->set_address("1.2.3.4"); + resolvers.mutable_socket_address()->set_port_value(80); + EXPECT_EQ(true, TestUtility::protoEqual(cares_.resolvers(0), resolvers)); +} + +// test typed_dns_resolver_config exits. +TEST_F(DnsFilterTest, TypedDnsResolverConfigExist) { + const std::string typed_dns_resolver_config_exist = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + typed_dns_resolver_config: + name: envoy.network.dns_resolver.cares + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig + resolvers: + - socket_address: + address: "1.2.3.4" + port_value: 80 + dns_resolver_options: + use_tcp_for_dns_lookups: true + no_default_search_domain: true + max_pending_lookups: 256 +server_config: + external_dns_table: + filename: {} +)EOF"; + + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", max_records_table_yaml); + std::string config_to_use = fmt::format(typed_dns_resolver_config_exist, temp_path); + setup(config_to_use); + + EXPECT_EQ(true, dns_resolver_options_.use_tcp_for_dns_lookups()); + EXPECT_EQ(true, dns_resolver_options_.no_default_search_domain()); + + // address matches + auto resolvers = envoy::config::core::v3::Address(); + resolvers.mutable_socket_address()->set_address("1.2.3.4"); + resolvers.mutable_socket_address()->set_port_value(80); + EXPECT_EQ(true, TestUtility::protoEqual(cares_.resolvers(0), resolvers)); +} + +// test when no DNS related config exists, an empty typed_dns_resolver_config is the parameter. +TEST_F(DnsFilterTest, NoDnsConfigExist) { + const std::string no_dns_config_exist = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 1s + max_pending_lookups: 256 +server_config: + external_dns_table: + filename: {} +)EOF"; + + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", max_records_table_yaml); + std::string config_to_use = fmt::format(no_dns_config_exist, temp_path); + setup(config_to_use); + + EXPECT_EQ(false, dns_resolver_options_.use_tcp_for_dns_lookups()); + EXPECT_EQ(false, dns_resolver_options_.no_default_search_domain()); + + // No address + EXPECT_EQ(0, cares_.resolvers().size()); +} + +// Verify downstream send and receive error handling. +TEST_F(DnsFilterTest, SendReceiveErrorHandling) { + InSequence s; + + setup(forward_query_off_config); + + filter_->onReceiveError(Api::IoError::IoErrorCode::UnknownError); + EXPECT_EQ(1, config_->stats().downstream_rx_errors_.value()); +} + TEST_F(DnsFilterTest, DEPRECATED_FEATURE_TEST(DeprecatedKnownSuffixes)) { InSequence s; diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc index 0416435dce46..e07d67113db8 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_utils_test.cc @@ -1,5 +1,5 @@ -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" -#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.validate.h" #include "source/common/network/address_impl.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_utils.h" diff --git a/test/extensions/filters/udp/udp_proxy/BUILD b/test/extensions/filters/udp/udp_proxy/BUILD index b1619e62a6b1..4ab2bbdcf384 100644 --- a/test/extensions/filters/udp/udp_proxy/BUILD +++ b/test/extensions/filters/udp/udp_proxy/BUILD @@ -47,8 +47,11 @@ envoy_extension_cc_test( srcs = ["udp_proxy_integration_test.cc"], extension_names = ["envoy.filters.udp_listener.udp_proxy"], deps = [ + "//envoy/network:filter_interface", + "//envoy/server:filter_config_interface", "//source/extensions/filters/udp/udp_proxy:config", "//test/integration:integration_lib", + "//test/test_common:registry_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index f2c4dd185a06..93b1c5087871 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -1,18 +1,61 @@ #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/network/filter.h" +#include "envoy/server/filter_config.h" #include "test/integration/integration.h" #include "test/test_common/network_utility.h" +#include "test/test_common/registry.h" namespace Envoy { namespace { +class UdpReverseFilter : public Network::UdpListenerReadFilter { +public: + UdpReverseFilter(Network::UdpReadFilterCallbacks& callbacks) : UdpListenerReadFilter(callbacks) {} + + // Network::UdpListenerReadFilter + Network::FilterStatus onData(Network::UdpRecvData& data) override { + std::string content = data.buffer_->toString(); + std::reverse(content.begin(), content.end()); + + data.buffer_->drain(data.buffer_->length()); + data.buffer_->add(content); + + return Network::FilterStatus::Continue; + } + + Network::FilterStatus onReceiveError(Api::IoError::IoErrorCode) override { + return Network::FilterStatus::Continue; + } +}; + +class UdpReverseFilterConfigFactory + : public Server::Configuration::NamedUdpListenerFilterConfigFactory { +public: + // NamedUdpListenerFilterConfigFactory + Network::UdpListenerFilterFactoryCb + createFilterFactoryFromProto(const Protobuf::Message&, + Server::Configuration::ListenerFactoryContext&) override { + return [](Network::UdpListenerFilterManager& filter_manager, + Network::UdpReadFilterCallbacks& callbacks) -> void { + filter_manager.addReadFilter(std::make_unique(callbacks)); + }; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "test.udp_listener.reverse"; } +}; class UdpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: UdpProxyIntegrationTest() - : BaseIntegrationTest(GetParam(), ConfigHelper::baseUdpListenerConfig()) {} + : BaseIntegrationTest(GetParam(), ConfigHelper::baseUdpListenerConfig()), + registration_(factory_) {} void setup(uint32_t upstream_count, absl::optional max_rx_datagram_size = absl::nullopt) { @@ -73,9 +116,30 @@ name: udp_proxy BaseIntegrationTest::initialize(); } + void setupMultiple() { + FakeUpstreamConfig::UdpConfig config; + config.max_rx_datagram_size_ = absl::nullopt; + setUdpFakeUpstream(config); + + config_helper_.addListenerFilter(R"EOF( +name: udp_proxy +typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig + stat_prefix: foo + cluster: cluster_0 +)EOF"); + // Add a reverse filter for reversing payload prior to UDP proxy + config_helper_.addListenerFilter(R"EOF( +name: test.udp_listener.reverse +)EOF"); + + BaseIntegrationTest::initialize(); + } + void requestResponseWithListenerAddress( const Network::Address::Instance& listener_address, std::string request = "hello", - std::string response = "world1", + std::string expected_request = "hello", std::string response = "world1", + std::string expected_response = "world1", uint64_t max_rx_datagram_size = Network::DEFAULT_UDP_MAX_DATAGRAM_SIZE) { // Send datagram to be proxied. Network::Test::UdpSyncPeer client(version_, max_rx_datagram_size); @@ -84,32 +148,36 @@ name: udp_proxy // Wait for the upstream datagram. Network::UdpRecvData request_datagram; ASSERT_TRUE(fake_upstreams_[0]->waitForUdpDatagram(request_datagram)); - EXPECT_EQ(request, request_datagram.buffer_->toString()); + EXPECT_EQ(expected_request, request_datagram.buffer_->toString()); // Respond from the upstream. fake_upstreams_[0]->sendUdpDatagram(response, request_datagram.addresses_.peer_); Network::UdpRecvData response_datagram; client.recv(response_datagram); - EXPECT_EQ(response, response_datagram.buffer_->toString()); + EXPECT_EQ(expected_response, response_datagram.buffer_->toString()); EXPECT_EQ(listener_address.asString(), response_datagram.addresses_.peer_->asString()); - EXPECT_EQ(request.size(), test_server_->counter("udp.foo.downstream_sess_rx_bytes")->value()); + EXPECT_EQ(expected_request.size(), + test_server_->counter("udp.foo.downstream_sess_rx_bytes")->value()); EXPECT_EQ(1, test_server_->counter("udp.foo.downstream_sess_rx_datagrams")->value()); - EXPECT_EQ(request.size(), + EXPECT_EQ(expected_request.size(), test_server_->counter("cluster.cluster_0.upstream_cx_tx_bytes_total")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.udp.sess_tx_datagrams")->value()); - EXPECT_EQ(response.size(), + EXPECT_EQ(expected_response.size(), test_server_->counter("cluster.cluster_0.upstream_cx_rx_bytes_total")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.udp.sess_rx_datagrams")->value()); // The stat is incremented after the send so there is a race condition and we must wait for // the counter to be incremented. - test_server_->waitForCounterEq("udp.foo.downstream_sess_tx_bytes", response.size()); + test_server_->waitForCounterEq("udp.foo.downstream_sess_tx_bytes", expected_response.size()); test_server_->waitForCounterEq("udp.foo.downstream_sess_tx_datagrams", 1); EXPECT_EQ(1, test_server_->counter("udp.foo.downstream_sess_total")->value()); EXPECT_EQ(1, test_server_->gauge("udp.foo.downstream_sess_active")->value()); } + + UdpReverseFilterConfigFactory factory_; + Registry::InjectFactory registration_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, UdpProxyIntegrationTest, @@ -190,6 +258,8 @@ TEST_P(UdpProxyIntegrationTest, LargePacketSizesOnLoopback) { const auto listener_address = Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); requestResponseWithListenerAddress(*listener_address, std::string(max_rx_datagram_size, 'a'), + std::string(max_rx_datagram_size, 'a'), + std::string(max_rx_datagram_size, 'b'), std::string(max_rx_datagram_size, 'b'), max_rx_datagram_size); } @@ -288,5 +358,14 @@ TEST_P(UdpProxyIntegrationTest, MultipleUpstreams) { EXPECT_EQ("world2", response_datagram.buffer_->toString()); } +// Make sure the UDP proxy filter on the chain will work. +TEST_P(UdpProxyIntegrationTest, MultipleFilters) { + setupMultiple(); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + requestResponseWithListenerAddress(*listener_address, "hello", "olleh"); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc index 76f1a0651af2..113a3c713b10 100644 --- a/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc +++ b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc @@ -31,6 +31,7 @@ class AlternateProtocolsCacheManagerTest : public testing::Test, std::unique_ptr factory_; Http::AlternateProtocolsCacheManagerSharedPtr manager_; envoy::config::core::v3::AlternateProtocolsCacheOptions options_; + testing::NiceMock dispatcher_; }; TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { @@ -42,7 +43,7 @@ TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { options_.mutable_key_value_store_config()->set_name("envoy.common.key_value"); options_.mutable_key_value_store_config()->mutable_typed_config()->PackFrom(kv_config); initialize(); - manager_->getCache(options_); + manager_->getCache(options_, dispatcher_); } } // namespace diff --git a/test/extensions/network/dns_resolver/apple/BUILD b/test/extensions/network/dns_resolver/apple/BUILD new file mode 100644 index 000000000000..d949920ad9d9 --- /dev/null +++ b/test/extensions/network/dns_resolver/apple/BUILD @@ -0,0 +1,40 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "apple_dns_impl_test", + srcs = select({ + "//bazel:apple": ["apple_dns_impl_test.cc"], + "//conditions:default": [], + }), + external_deps = ["abseil_synchronization"], + deps = [ + "//envoy/event:dispatcher_interface", + "//envoy/network:dns_interface", + "//source/common/event:dispatcher_includes", + "//envoy/event:file_event_interface", + "//source/common/stats:isolated_store_lib", + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", + "//source/common/common:random_generator_lib", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "//test/test_common:threadsafe_singleton_injector_lib", + "//test/mocks/event:event_mocks", + ] + select({ + "//bazel:apple": [ + "//source/extensions/network/dns_resolver/apple:config", + ], + "//conditions:default": [], + }), +) diff --git a/test/common/network/apple_dns_impl_test.cc b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc similarity index 78% rename from test/common/network/apple_dns_impl_test.cc rename to test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc index b7e7904ad7e6..c936f1b3e519 100644 --- a/test/common/network/apple_dns_impl_test.cc +++ b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc @@ -11,9 +11,10 @@ #include "envoy/network/dns.h" #include "source/common/network/address_impl.h" -#include "source/common/network/apple_dns_impl.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/utility.h" #include "source/common/stats/isolated_store_impl.h" +#include "source/extensions/network/dns_resolver/apple/apple_dns_impl.h" #include "test/mocks/event/mocks.h" #include "test/test_common/environment.h" @@ -38,6 +39,14 @@ namespace Envoy { namespace Network { namespace { +void expectAppleTypedDnsResolverConfig( + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) { + EXPECT_EQ(typed_dns_resolver_config.name(), std::string(Network::AppleDnsResolver)); + EXPECT_EQ( + typed_dns_resolver_config.typed_config().type_url(), + "type.googleapis.com/envoy.extensions.network.dns_resolver.apple.v3.AppleDnsResolverConfig"); +} + class MockDnsService : public Network::DnsService { public: MockDnsService() = default; @@ -61,7 +70,11 @@ class AppleDnsImplTest : public testing::Test { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} void SetUp() override { - resolver_ = dispatcher_->createDnsResolver({}, envoy::config::core::v3::DnsResolverOptions()); + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Network::createDefaultDnsResolverFactory(typed_dns_resolver_config); + resolver_ = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); } ActiveDnsQuery* resolveWithExpectations(const std::string& address, @@ -75,13 +88,44 @@ class AppleDnsImplTest : public testing::Test { EXPECT_EQ(expected_status, status); if (expected_results) { EXPECT_FALSE(results.empty()); + absl::optional is_v4{}; for (const auto& result : results) { - if (lookup_family == DnsLookupFamily::V4Only || - lookup_family == DnsLookupFamily::V4Preferred) { + switch (lookup_family) { + case DnsLookupFamily::V4Only: EXPECT_NE(nullptr, result.address_->ip()->ipv4()); - } else if (lookup_family == DnsLookupFamily::V6Only || - lookup_family == DnsLookupFamily::Auto) { + break; + case DnsLookupFamily::V6Only: EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + break; + // In CI these modes could return either IPv4 or IPv6 with the non-mocked API calls. + // But regardless of the family all returned addresses need to be one _or_ the other. + case DnsLookupFamily::V4Preferred: + case DnsLookupFamily::Auto: + // Set the expectation for subsequent responses based on the first one. + if (!is_v4.has_value()) { + if (result.address_->ip()->ipv4()) { + is_v4 = true; + } else { + is_v4 = false; + } + } + + if (is_v4.value()) { + EXPECT_NE(nullptr, result.address_->ip()->ipv4()); + } else { + EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + } + break; + // All could be either IPv4 or IPv6. + case DnsLookupFamily::All: + if (result.address_->ip()->ipv4()) { + EXPECT_NE(nullptr, result.address_->ip()->ipv4()); + } else { + EXPECT_NE(nullptr, result.address_->ip()->ipv6()); + } + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } } } @@ -122,15 +166,84 @@ class AppleDnsImplTest : public testing::Test { DnsResolverSharedPtr resolver_; }; -TEST_F(AppleDnsImplTest, InvalidConfigOptions) { - auto dns_resolver_options = envoy::config::core::v3::DnsResolverOptions(); - EXPECT_DEATH( - dispatcher_->createDnsResolver({nullptr}, dns_resolver_options), - "defining custom resolvers is not possible when using Apple APIs for DNS resolution"); - dns_resolver_options.set_use_tcp_for_dns_lookups(true); - EXPECT_DEATH( - dispatcher_->createDnsResolver({}, dns_resolver_options), - "using TCP for DNS lookups is not possible when using Apple APIs for DNS resolution"); +// By default in MacOS, it creates an AppleDnsResolver typed config. +TEST_F(AppleDnsImplTest, DefaultAppleDnsResolverConstruction) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::config::cluster::v3::Cluster config; + typed_dns_resolver_config = Network::makeDnsResolverConfig(config); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// If typed apple DNS resolver config exits, use it. +TEST_F(AppleDnsImplTest, TypedAppleDnsResolverConfigExist) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::config::cluster::v3::Cluster config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url( + "type.googleapis.com/envoy.extensions.network.dns_resolver.apple.v3.AppleDnsResolverConfig"); + typed_dns_resolver_config.set_name(std::string(Network::AppleDnsResolver)); + config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + EXPECT_TRUE(config.has_typed_dns_resolver_config()); + EXPECT_TRUE(checkUseAppleApiForDnsLookups(typed_dns_resolver_config)); + typed_dns_resolver_config.Clear(); + typed_dns_resolver_config = Network::makeDnsResolverConfig(config); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// Test default DNS resolver typed config creation based on build system and configuration is +// expected. +TEST_F(AppleDnsImplTest, MakeDefaultDnsResolverTestInApple) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::makeDefaultDnsResolverConfig(typed_dns_resolver_config); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// Test default DNS resolver factory creation based on build system and configuration is +// expected. +TEST_F(AppleDnsImplTest, MakeDefaultDnsResolverFactoryTestInApple) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDefaultDnsResolverFactory(typed_dns_resolver_config); + EXPECT_EQ(dns_resolver_factory.name(), std::string(AppleDnsResolver)); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// Test DNS resolver factory creation from proto without typed config. +TEST_F(AppleDnsImplTest, MakeDnsResolverFactoryFromProtoTestInAppleWithoutTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDnsResolverFactoryFromProto(envoy::config::bootstrap::v3::Bootstrap(), + typed_dns_resolver_config); + EXPECT_EQ(dns_resolver_factory.name(), std::string(AppleDnsResolver)); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// Test DNS resolver factory creation from proto with valid typed config +TEST_F(AppleDnsImplTest, MakeDnsResolverFactoryFromProtoTestInAppleWithGoodTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url( + "type.googleapis.com/envoy.extensions.network.dns_resolver.apple.v3.AppleDnsResolverConfig"); + typed_dns_resolver_config.set_name(std::string(Network::AppleDnsResolver)); + config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + Network::DnsResolverFactory& dns_resolver_factory = + Envoy::Network::createDnsResolverFactoryFromProto(config, typed_dns_resolver_config); + EXPECT_EQ(dns_resolver_factory.name(), std::string(AppleDnsResolver)); + expectAppleTypedDnsResolverConfig(typed_dns_resolver_config); +} + +// Test DNS resolver factory creation from proto with invalid typed config +TEST_F(AppleDnsImplTest, MakeDnsResolverFactoryFromProtoTestInAppleWithInvalidTypedConfig) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + + typed_dns_resolver_config.mutable_typed_config()->set_type_url("type.googleapis.com/foo"); + typed_dns_resolver_config.set_name("bar"); + config.mutable_typed_dns_resolver_config()->MergeFrom(typed_dns_resolver_config); + EXPECT_THROW_WITH_MESSAGE( + Envoy::Network::createDnsResolverFactoryFromProto(config, typed_dns_resolver_config), + Envoy::EnvoyException, "Didn't find a registered implementation for name: 'bar'"); } // Validate that when AppleDnsResolverImpl is destructed with outstanding requests, @@ -149,22 +262,32 @@ TEST_F(AppleDnsImplTest, LocalLookup) { dispatcher_->run(Event::Dispatcher::RunType::Block); } -TEST_F(AppleDnsImplTest, DnsIpAddressVersion) { +TEST_F(AppleDnsImplTest, DnsIpAddressVersionAuto) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::Auto, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); +} +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV4Preferred) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Preferred, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); +} +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV4Only) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Only, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); +} +TEST_F(AppleDnsImplTest, DnsIpAddressVersionV6Only) { EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V6Only, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::All, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); } // dns_sd is very opaque and does not explicitly call out the state that is kept across queries. @@ -206,6 +329,10 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionInvalid) { EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::V6Only, DnsResolver::ResolutionStatus::Failure, false)); dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::All, + DnsResolver::ResolutionStatus::Failure, false)); + dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(AppleDnsImplTest, CallbackException) { @@ -305,10 +432,8 @@ class AppleDnsImplFakeApiTest : public testing::Test { Network::Address::Ipv4Instance address(&addr4); absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( // Have the API call synchronously call the provided callback. WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { @@ -332,7 +457,8 @@ class AppleDnsImplFakeApiTest : public testing::Test { enum AddressType { V4, V6, Both }; - void fallbackWith(DnsLookupFamily dns_lookup_family, AddressType address_type) { + void fallbackWith(DnsLookupFamily dns_lookup_family, AddressType address_type, + uint32_t expected_address_size = 1) { const std::string hostname = "foo.com"; sockaddr_in addr4; addr4.sin_family = AF_INET; @@ -349,10 +475,8 @@ class AppleDnsImplFakeApiTest : public testing::Test { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -361,10 +485,10 @@ class AppleDnsImplFakeApiTest : public testing::Test { auto query = resolver_->resolve( hostname, dns_lookup_family, - [&dns_callback_executed, dns_lookup_family, address_type]( + [&dns_callback_executed, dns_lookup_family, address_type, expected_address_size]( DnsResolver::ResolutionStatus status, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Success, status); - EXPECT_EQ(1, response.size()); + EXPECT_EQ(expected_address_size, response.size()); if (dns_lookup_family == DnsLookupFamily::Auto) { if (address_type == AddressType::V4) { @@ -381,6 +505,23 @@ class AppleDnsImplFakeApiTest : public testing::Test { EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); } } + + if (dns_lookup_family == DnsLookupFamily::All) { + switch (address_type) { + case AddressType::V4: + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + break; + case AddressType::V6: + EXPECT_NE(nullptr, response.front().address_->ip()->ipv6()); + break; + case AddressType::Both: + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + EXPECT_NE(nullptr, response.back().address_->ip()->ipv6()); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } dns_callback_executed.Notify(); }); ASSERT_NE(nullptr, query); @@ -429,10 +570,8 @@ TEST_F(AppleDnsImplFakeApiTest, ErrorInSocketAccess) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(-1)); @@ -466,10 +605,8 @@ TEST_F(AppleDnsImplFakeApiTest, InvalidFileEvent) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -503,10 +640,8 @@ TEST_F(AppleDnsImplFakeApiTest, ErrorInProcessResult) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -558,10 +693,8 @@ TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletion) { Network::Address::Ipv4Instance address(&addr4); absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( // Have the API call synchronously call the provided callback. WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { @@ -618,10 +751,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -649,6 +780,7 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { dns_callback_executed.WaitForNotification(); } +// TODO: write a TEST_P harness to eliminate duplication. TEST_F(AppleDnsImplFakeApiTest, AutoOnlyV6IfBothV6andV4) { fallbackWith(DnsLookupFamily::Auto, AddressType::Both); } @@ -673,6 +805,18 @@ TEST_F(AppleDnsImplFakeApiTest, V4PreferredV4IfOnlyV4) { fallbackWith(DnsLookupFamily::V4Preferred, AddressType::V4); } +TEST_F(AppleDnsImplFakeApiTest, AllIfBothV6andV4) { + fallbackWith(DnsLookupFamily::All, AddressType::Both, 2 /* expected_address_size*/); +} + +TEST_F(AppleDnsImplFakeApiTest, AllV6IfOnlyV6) { + fallbackWith(DnsLookupFamily::All, AddressType::V6); +} + +TEST_F(AppleDnsImplFakeApiTest, AllV4IfOnlyV4) { + fallbackWith(DnsLookupFamily::All, AddressType::V4); +} + TEST_F(AppleDnsImplFakeApiTest, MultipleAddressesSecondOneFails) { const std::string hostname = "foo.com"; sockaddr_in addr4; @@ -684,10 +828,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddressesSecondOneFails) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -734,10 +876,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueries) { absl::Notification dns_callback_executed2; // Start first query. - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -805,10 +945,8 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueriesOneFails) { absl::Notification dns_callback_executed2; // Start first query. - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -869,10 +1007,8 @@ TEST_F(AppleDnsImplFakeApiTest, ResultWithOnlyNonAdditiveReplies) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -904,10 +1040,8 @@ TEST_F(AppleDnsImplFakeApiTest, ResultWithNullAddress) { Network::Address::Ipv4Instance address(&addr4); DNSServiceGetAddrInfoReply reply_callback; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); @@ -941,10 +1075,8 @@ TEST_F(AppleDnsImplFakeApiTest, DeallocateOnDestruction) { DNSServiceGetAddrInfoReply reply_callback; absl::Notification dns_callback_executed; - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, 0, + StrEq(hostname.c_str()), _, _)) .WillOnce(DoAll( SaveArg<5>(&reply_callback), WithArgs<0>(Invoke([](DNSServiceRef* ref) -> void { *ref = new _DNSServiceRef_t{}; })), diff --git a/test/extensions/network/dns_resolver/cares/BUILD b/test/extensions/network/dns_resolver/cares/BUILD new file mode 100644 index 000000000000..511848fc8c69 --- /dev/null +++ b/test/extensions/network/dns_resolver/cares/BUILD @@ -0,0 +1,41 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "dns_impl_test", + srcs = ["dns_impl_test.cc"], + args = [ + # Force creation of c-ares DnsResolverImpl when running test on macOS. + "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", + ], + # TODO(envoyproxy/windows-dev): Under winsock2 this is behaving unusually for windows, even as + # 127.0.0.1 and ::1 are explicitly added to `c:\windows\system32\drivers\etc\hosts` ... see: + # https://gist.github.com/wrowe/24fe5b93b58bb444bce7ecc134905395 + tags = ["fails_on_clang_cl"], + deps = [ + "//envoy/event:dispatcher_interface", + "//envoy/network:address_interface", + "//envoy/network:dns_interface", + "//source/common/buffer:buffer_lib", + "//source/common/event:dispatcher_includes", + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:filter_lib", + "//source/common/network:listen_socket_lib", + "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", + "//source/extensions/network/dns_resolver/cares:config", + "//test/mocks/network:network_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/common/network/dns_impl_test.cc b/test/extensions/network/dns_resolver/cares/dns_impl_test.cc similarity index 83% rename from test/common/network/dns_impl_test.cc rename to test/extensions/network/dns_resolver/cares/dns_impl_test.cc index 0d5afe756ce2..d60dc911e65f 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/extensions/network/dns_resolver/cares/dns_impl_test.cc @@ -13,11 +13,11 @@ #include "source/common/common/utility.h" #include "source/common/event/dispatcher_impl.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_impl.h" #include "source/common/network/filter_impl.h" #include "source/common/network/listen_socket_impl.h" #include "source/common/network/utility.h" #include "source/common/stream_info/stream_info_impl.h" +#include "source/extensions/network/dns_resolver/cares/dns_impl.h" #include "test/mocks/network/mocks.h" #include "test/test_common/environment.h" @@ -45,6 +45,7 @@ using testing::IsSupersetOf; using testing::NiceMock; using testing::Not; using testing::Return; +using testing::UnorderedElementsAreArray; namespace Envoy { namespace Network { @@ -358,7 +359,29 @@ TEST_F(DnsImplConstructor, SupportsCustomResolvers) { auto addr4 = Network::Utility::parseInternetAddressAndPort("127.0.0.1:54"); char addr6str[INET6_ADDRSTRLEN]; auto addr6 = Network::Utility::parseInternetAddressAndPort("[::1]:54"); - auto resolver = dispatcher_->createDnsResolver({addr4, addr6}, dns_resolver_options_); + + // convert the address and options into typed_dns_resolver_config + envoy::config::core::v3::Address dns_resolvers; + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv4Instance(addr4->ip()->addressAsString(), addr4->ip()->port()), + dns_resolvers); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(dns_resolvers); + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv6Instance(addr6->ip()->addressAsString(), addr6->ip()->port()), + dns_resolvers); + cares.add_resolvers()->MergeFrom(dns_resolvers); + // copy over dns_resolver_options_ + cares.mutable_dns_resolver_options()->MergeFrom(dns_resolver_options_); + + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + Network::DnsResolverFactory& dns_resolver_factory = + createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); + auto resolver = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); + auto peer = std::make_unique(dynamic_cast(resolver.get())); ares_addr_port_node* resolvers; int result = ares_get_servers_ports(peer->channel(), &resolvers); @@ -373,6 +396,76 @@ TEST_F(DnsImplConstructor, SupportsCustomResolvers) { ares_free_data(resolvers); } +TEST_F(DnsImplConstructor, SupportsMultipleCustomResolversAndDnsOptions) { + char addr4str[INET_ADDRSTRLEN]; + // we pick a port that isn't 53 as the default resolve.conf might be + // set to point to localhost. + auto addr4_a = Network::Utility::parseInternetAddressAndPort("1.2.3.4:80"); + auto addr4_b = Network::Utility::parseInternetAddressAndPort("5.6.7.8:81"); + char addr6str[INET6_ADDRSTRLEN]; + auto addr6_a = Network::Utility::parseInternetAddressAndPort("[::2]:90"); + auto addr6_b = Network::Utility::parseInternetAddressAndPort("[::3]:91"); + + // convert the address and options into typed_dns_resolver_config + envoy::config::core::v3::Address dns_resolvers; + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv4Instance(addr4_a->ip()->addressAsString(), addr4_a->ip()->port()), + dns_resolvers); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + // copy addr4_a + cares.add_resolvers()->MergeFrom(dns_resolvers); + // copy addr4_b + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv4Instance(addr4_b->ip()->addressAsString(), addr4_b->ip()->port()), + dns_resolvers); + cares.add_resolvers()->MergeFrom(dns_resolvers); + // copy addr6_a + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv6Instance(addr6_a->ip()->addressAsString(), addr6_a->ip()->port()), + dns_resolvers); + cares.add_resolvers()->MergeFrom(dns_resolvers); + // copy addr6_b + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv6Instance(addr6_b->ip()->addressAsString(), addr6_b->ip()->port()), + dns_resolvers); + cares.add_resolvers()->MergeFrom(dns_resolvers); + + // copy over dns_resolver_options_ + cares.mutable_dns_resolver_options()->MergeFrom(dns_resolver_options_); + + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + Network::DnsResolverFactory& dns_resolver_factory = + createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); + auto resolver = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); + auto peer = std::make_unique(dynamic_cast(resolver.get())); + ares_addr_port_node* resolvers; + int result = ares_get_servers_ports(peer->channel(), &resolvers); + EXPECT_EQ(result, ARES_SUCCESS); + // check v4 + EXPECT_EQ(resolvers->family, AF_INET); + EXPECT_EQ(resolvers->udp_port, 80); + EXPECT_STREQ(inet_ntop(AF_INET, &resolvers->addr.addr4, addr4str, INET_ADDRSTRLEN), "1.2.3.4"); + EXPECT_EQ(resolvers->next->family, AF_INET); + EXPECT_EQ(resolvers->next->udp_port, 81); + EXPECT_STREQ(inet_ntop(AF_INET, &resolvers->next->addr.addr4, addr4str, INET_ADDRSTRLEN), + "5.6.7.8"); + // check v6 + EXPECT_EQ(resolvers->next->next->family, AF_INET6); + EXPECT_EQ(resolvers->next->next->udp_port, 90); + EXPECT_STREQ(inet_ntop(AF_INET6, &resolvers->next->next->addr.addr6, addr6str, INET6_ADDRSTRLEN), + "::2"); + EXPECT_EQ(resolvers->next->next->next->family, AF_INET6); + EXPECT_EQ(resolvers->next->next->next->udp_port, 91); + EXPECT_STREQ( + inet_ntop(AF_INET6, &resolvers->next->next->next->addr.addr6, addr6str, INET6_ADDRSTRLEN), + "::3"); + + ares_free_data(resolvers); +} + // Custom instance that dispatches everything to a regular instance except for asString(), where // it borks the port. class CustomInstance : public Address::Instance { @@ -409,7 +502,25 @@ class CustomInstance : public Address::Instance { TEST_F(DnsImplConstructor, SupportCustomAddressInstances) { auto test_instance(std::make_shared("127.0.0.1", 45)); EXPECT_EQ(test_instance->asString(), "127.0.0.1:borked_port_45"); - auto resolver = dispatcher_->createDnsResolver({test_instance}, dns_resolver_options_); + + // Construct a typed_dns_resolver_config based on the IP address and port number. + envoy::config::core::v3::Address dns_resolvers; + Network::Utility::addressToProtobufAddress( + Network::Address::Ipv4Instance(test_instance->ip()->addressAsString(), + test_instance->ip()->port()), + dns_resolvers); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(dns_resolvers); + // copy over dns_resolver_options_ + cares.mutable_dns_resolver_options()->MergeFrom(dns_resolver_options_); + + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + Network::DnsResolverFactory& dns_resolver_factory = + createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); + auto resolver = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); auto peer = std::make_unique(dynamic_cast(resolver.get())); ares_addr_port_node* resolvers; int result = ares_get_servers_ports(peer->channel(), &resolvers); @@ -424,9 +535,22 @@ TEST_F(DnsImplConstructor, SupportCustomAddressInstances) { TEST_F(DnsImplConstructor, BadCustomResolvers) { envoy::config::core::v3::Address pipe_address; pipe_address.mutable_pipe()->set_path("foo"); - auto pipe_instance = Network::Utility::protobufAddressToAddress(pipe_address); - EXPECT_THROW_WITH_MESSAGE(dispatcher_->createDnsResolver({pipe_instance}, dns_resolver_options_), - EnvoyException, "DNS resolver 'foo' is not an IP address"); + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + cares.add_resolvers()->MergeFrom(pipe_address); + // copy over dns_resolver_options_ + cares.mutable_dns_resolver_options()->MergeFrom(dns_resolver_options_); + + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + EXPECT_THROW_WITH_MESSAGE( + { + Network::DnsResolverFactory& dns_resolver_factory = + createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); + auto resolver = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); + }, + EnvoyException, "DNS resolver 'foo' is not an IP address"); } class DnsImplTest : public testing::TestWithParam { @@ -434,6 +558,30 @@ class DnsImplTest : public testing::TestWithParam { DnsImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + envoy::config::core::v3::TypedExtensionConfig getTypedDnsResolverConfig( + const std::vector& resolver_inst, + const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) { + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig cares; + auto dns_resolvers = envoy::config::core::v3::Address(); + + // Setup the DNS resolver address. Could be either IPv4 or IPv6. + for (const auto& resolver : resolver_inst) { + if (resolver->ip() != nullptr) { + dns_resolvers.mutable_socket_address()->set_address(resolver->ip()->addressAsString()); + dns_resolvers.mutable_socket_address()->set_port_value(resolver->ip()->port()); + cares.add_resolvers()->MergeFrom(dns_resolvers); + } + } + // Copy over the dns_resolver_options_. + cares.mutable_dns_resolver_options()->MergeFrom(dns_resolver_options); + // setup the typed config + typed_dns_resolver_config.mutable_typed_config()->PackFrom(cares); + typed_dns_resolver_config.set_name(std::string(Network::CaresDnsResolver)); + + return typed_dns_resolver_config; + } + void SetUp() override { // Instantiate TestDnsServer and listen on a random port on the loopback address. server_ = std::make_unique(*dispatcher_); @@ -441,14 +589,21 @@ class DnsImplTest : public testing::TestWithParam { Network::Test::getCanonicalLoopbackAddress(GetParam())); listener_ = dispatcher_->createListener(socket_, *server_, true); updateDnsResolverOptions(); + // Create a resolver options on stack here to emulate what actually happens in envoy bootstrap. envoy::config::core::v3::DnsResolverOptions dns_resolver_options = dns_resolver_options_; + envoy::config::core::v3::TypedExtensionConfig typed_dns_resolver_config; + if (setResolverInConstructor()) { - resolver_ = dispatcher_->createDnsResolver({socket_->connectionInfoProvider().localAddress()}, - dns_resolver_options); + typed_dns_resolver_config = getTypedDnsResolverConfig( + {socket_->connectionInfoProvider().localAddress()}, dns_resolver_options); } else { - resolver_ = dispatcher_->createDnsResolver({}, dns_resolver_options); + typed_dns_resolver_config = getTypedDnsResolverConfig({}, dns_resolver_options); } + Network::DnsResolverFactory& dns_resolver_factory = + createDnsResolverFactoryFromTypedConfig(typed_dns_resolver_config); + resolver_ = + dns_resolver_factory.createDnsResolver(*dispatcher_, *api_, typed_dns_resolver_config); // Point c-ares at the listener with no search domains and TCP-only. peer_ = std::make_unique(dynamic_cast(resolver_.get())); @@ -502,7 +657,7 @@ class DnsImplTest : public testing::TestWithParam { if (address == "localhost" && lookup_family == DnsLookupFamily::V4Only) { EXPECT_THAT(address_as_string_list, IsSupersetOf(expected_results)); } else { - EXPECT_EQ(expected_results, address_as_string_list); + EXPECT_THAT(address_as_string_list, UnorderedElementsAreArray(expected_results)); } for (const auto& expected_absent_result : expected_absent_results) { @@ -850,6 +1005,33 @@ TEST_P(DnsImplTest, V4PreferredV4IfOnlyV4) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(DnsImplTest, AllIfBothV6andV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::All, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}, {"1::2"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AllV6IfOnlyV6) { + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::All, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AllV4IfOnlyV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::All, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + // Validate working of cancellation provided by ActiveDnsQuery return. TEST_P(DnsImplTest, Cancel) { server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); diff --git a/test/extensions/stats_sinks/common/statsd/statsd_test.cc b/test/extensions/stats_sinks/common/statsd/statsd_test.cc index bdcb16fb0238..3d20059442c1 100644 --- a/test/extensions/stats_sinks/common/statsd/statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/statsd_test.cc @@ -141,6 +141,20 @@ TEST_F(TcpStatsdSinkTest, SiSuffix) { tls_.shutdownThread(); } +TEST_F(TcpStatsdSinkTest, ScaledPercent) { + expectCreateConnection(); + + NiceMock items; + items.name_ = "items"; + items.unit_ = Stats::Histogram::Unit::Percent; + + EXPECT_CALL(*connection_, write(BufferStringEqual("envoy.items:0.5|h\n"), _)); + sink_->onHistogramComplete(items, Stats::Histogram::PercentScale / 2); + + EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); + tls_.shutdownThread(); +} + // Verify that when there is no statsd host we correctly empty all output buffers so we don't // infinitely buffer. TEST_F(TcpStatsdSinkTest, NoHost) { diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index ed2f816fee84..f57a94360b4a 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -363,6 +363,23 @@ TEST(UdpStatsdSinkTest, SiSuffix) { tls_.shutdownThread(); } +TEST(UdpStatsdSinkTest, ScaledPercent) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + NiceMock tls_; + UdpStatsdSink sink(tls_, writer_ptr, false); + + NiceMock items; + items.name_ = "items"; + items.unit_ = Stats::Histogram::Unit::Percent; + + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), + write("envoy.items:0.5|h")); + sink.onHistogramComplete(items, Stats::Histogram::PercentScale / 2); + + tls_.shutdownThread(); +} + TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); diff --git a/test/extensions/tracers/xray/localized_sampling_test.cc b/test/extensions/tracers/xray/localized_sampling_test.cc index c52fc80703fb..4e47b99c3ef0 100644 --- a/test/extensions/tracers/xray/localized_sampling_test.cc +++ b/test/extensions/tracers/xray/localized_sampling_test.cc @@ -24,24 +24,43 @@ class LocalizedSamplingStrategyTest : public ::testing::Test { TEST_F(LocalizedSamplingStrategyTest, EmptyRules) { NiceMock random_generator; LocalizedSamplingStrategy strategy{"", random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); } TEST_F(LocalizedSamplingStrategyTest, BadJson) { NiceMock random_generator; LocalizedSamplingStrategy strategy{"{{}", random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); } TEST_F(LocalizedSamplingStrategyTest, EmptyRulesDefaultRate) { NiceMock random_generator; LocalizedSamplingStrategy strategy{"{{}", random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); // Make a copy of default_manifest_(LocalizedSamplingManifest object) since the // object returned is a const reference and defaultRule() function is not a // 'const member function' of LocalizedSamplingManifest class. - LocalizedSamplingManifest default_manifest_copy{strategy.defaultManifest()}; - ASSERT_EQ(default_manifest_copy.defaultRule().rate(), 0.05); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); +} + +TEST_F(LocalizedSamplingStrategyTest, MissingRulesUseCustomDefault) { + NiceMock random_generator; + constexpr auto rules_json = R"EOF( +{ + "version": 2, + "rules": [], + "default": { + "fixed_target": 1, + "rate": 0.1 + } +} + )EOF"; + LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { @@ -66,10 +85,40 @@ TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); +} + +TEST_F(LocalizedSamplingStrategyTest, InvalidDefaultRuleRate) { + NiceMock random_generator; + constexpr auto rules_json = R"EOF( +{ + "version": 2, + "rules": [ + { + "description": "X-Ray rule", + "host": "*", + "http_method": "*", + "url_path": "/api/move/*", + "fixed_target": 0, + "rate": 0.5 + } + ], + "default": { + "fixed_target": 1, + "rate": 1.5 + } +} + )EOF"; + LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } -TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { +TEST_F(LocalizedSamplingStrategyTest, InvalidRulesRate) { NiceMock random_generator; constexpr auto rules_json = R"EOF( { @@ -91,7 +140,9 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0); } TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { @@ -116,7 +167,7 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { @@ -140,7 +191,10 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { @@ -164,7 +218,10 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { @@ -189,7 +246,10 @@ TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { } )EOF"; LocalizedSamplingStrategy strategy{wrong_version, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { @@ -213,7 +273,10 @@ TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { } )EOF"; LocalizedSamplingStrategy strategy{missing_version, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { @@ -234,7 +297,10 @@ TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // default sampling rate of 0.05 + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.05); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { @@ -259,7 +325,9 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { @@ -284,7 +352,9 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { @@ -309,7 +379,10 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { @@ -333,7 +406,10 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { @@ -357,7 +433,10 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { @@ -382,10 +461,13 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } -TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { +TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedTarget) { NiceMock random_generator; constexpr auto rules_json = R"EOF( { @@ -407,7 +489,10 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { @@ -422,17 +507,20 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { "http_method": "*", "url_path": "/api/move/*", "fixed_target": 0, - "rate": 0.05 + "rate": -0.05 } ], "default": { "fixed_target": 1, - "rate": -0.1 + "rate": 0.1 } } )EOF"; LocalizedSamplingStrategy strategy{rules_json, random_generator, time_system_}; - ASSERT_TRUE(strategy.usingDefaultManifest()); + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + LocalizedSamplingManifest manifest_copy{strategy.manifest()}; + // custom default rate + ASSERT_EQ(manifest_copy.defaultRule().rate(), 0.1); } TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { @@ -459,12 +547,11 @@ TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { )EOF"; LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); SamplingRequest req; ASSERT_TRUE(strategy.shouldTrace(req)); // first one should be traced - int i = 10; - while (i-- > 0) { + for (int i = 0; i < 10; ++i) { ASSERT_FALSE(strategy.shouldTrace(req)); } } @@ -493,11 +580,10 @@ TEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) { )EOF"; LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); SamplingRequest req; - int i = 10; - while (i-- > 0) { + for (int i = 0; i < 10; ++i) { ASSERT_TRUE(strategy.shouldTrace(req)); } } @@ -530,12 +616,11 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) { )EOF"; LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); SamplingRequest req; req.host_ = "amazon.com"; // host does not match, so default rules apply. - int i = 10; - while (i-- > 0) { + for (int i = 0; i < 10; ++i) { ASSERT_FALSE(strategy.shouldTrace(req)); } } @@ -568,12 +653,11 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) { )EOF"; LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); SamplingRequest req; req.http_method_ = "GET"; // method does not match, so default rules apply. - int i = 10; - while (i-- > 0) { + for (int i = 0; i < 10; ++i) { ASSERT_FALSE(strategy.shouldTrace(req)); } } @@ -606,12 +690,61 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingPath) { )EOF"; LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; - ASSERT_FALSE(strategy.usingDefaultManifest()); + ASSERT_TRUE(strategy.manifest().hasCustomRules()); SamplingRequest req; req.http_url_ = "/"; // method does not match, so default rules apply. - int i = 10; - while (i-- > 0) { + for (int i = 0; i < 10; ++i) { + ASSERT_FALSE(strategy.shouldTrace(req)); + } +} + +TEST_F(LocalizedSamplingStrategyTest, CustomDefaultRule) { + NiceMock rng; + // this following value doesn't affect the test + EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); + + constexpr auto rules_json = R"EOF( +{ + "version": 2, + "default": { + "fixed_target": 0, + "rate": 0 + } +} + )EOF"; + + LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + + SamplingRequest req; + req.http_url_ = "/"; + for (int i = 0; i < 10; ++i) { + ASSERT_FALSE(strategy.shouldTrace(req)); + } +} + +TEST_F(LocalizedSamplingStrategyTest, InvalidCustomDefaultRule) { + NiceMock rng; + // this following value doesn't affect the test + EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); + constexpr auto rules_json = R"EOF( +{ +"version": 2, +"default": { + "fixed_target": 0, + "rate": 2.0 + } +} +)EOF"; + + LocalizedSamplingStrategy strategy{rules_json, rng, time_system_}; + ASSERT_FALSE(strategy.manifest().hasCustomRules()); + + SamplingRequest req; + req.http_url_ = "/"; + ASSERT_TRUE(strategy.shouldTrace(req)); // The default rule traces the first request each second + for (int i = 0; i < 10; ++i) { ASSERT_FALSE(strategy.shouldTrace(req)); } } diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 8f81c9138b47..71541239aa49 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -36,13 +36,13 @@ struct MockDaemonBroker : DaemonBroker { }; struct TraceProperties { - TraceProperties(const std::string span_name, const std::string origin_name, - const std::string aws_key_value, const std::string operation_name, - const std::string http_method, const std::string http_url, - const std::string user_agent) + TraceProperties(const std::string& span_name, const std::string& origin_name, + const std::string& aws_key_value, const std::string& operation_name, + const std::string& http_method, const std::string& http_url, + const std::string& user_agent, const std::string& direction) : span_name(span_name), origin_name(origin_name), aws_key_value(aws_key_value), operation_name(operation_name), http_method(http_method), http_url(http_url), - user_agent(user_agent) {} + user_agent(user_agent), direction(direction) {} const std::string span_name; const std::string origin_name; const std::string aws_key_value; @@ -50,17 +50,19 @@ struct TraceProperties { const std::string http_method; const std::string http_url; const std::string user_agent; + const std::string direction; }; class XRayTracerTest : public ::testing::Test { public: XRayTracerTest() : broker_(std::make_unique("127.0.0.1:2000")), - expected_(std::make_unique("Service 1", "AWS::Service::Proxy", - "test_value", "Create", "POST", "/first/second", - "Mozilla/5.0 (Macintosh; Intel Mac OS X)")) {} + expected_(std::make_unique( + "Service 1", "AWS::Service::Proxy", "test_value", "egress hostname", "POST", + "/first/second", "Mozilla/5.0 (Macintosh; Intel Mac OS X)", "egress")) {} absl::flat_hash_map aws_metadata_; NiceMock server_; + NiceMock config_; std::unique_ptr broker_; std::unique_ptr expected_; void commonAsserts(daemon::Segment& s); @@ -75,14 +77,15 @@ void XRayTracerTest::commonAsserts(daemon::Segment& s) { EXPECT_EQ(expected_->http_url, s.http().request().fields().at("url").string_value().c_str()); EXPECT_EQ(expected_->user_agent, s.http().request().fields().at(Tracing::Tags::get().UserAgent).string_value().c_str()); + EXPECT_EQ(expected_->direction, s.annotations().at("direction").c_str()); } TEST_F(XRayTracerTest, SerializeSpanTest) { constexpr uint32_t expected_status_code = 202; constexpr uint32_t expected_content_length = 1337; - constexpr auto expected_client_ip = "10.0.0.100"; - constexpr auto expected_x_forwarded_for = false; - constexpr auto expected_upstream_address = "10.0.0.200"; + constexpr absl::string_view expected_client_ip = "10.0.0.100"; + constexpr bool expected_x_forwarded_for = false; + constexpr absl::string_view expected_upstream_address = "10.0.0.200"; auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); @@ -92,7 +95,7 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { commonAsserts(s); EXPECT_FALSE(s.trace_id().empty()); EXPECT_FALSE(s.id().empty()); - EXPECT_EQ(1, s.annotations().size()); + EXPECT_EQ(2, s.annotations().size()); EXPECT_TRUE(s.parent_id().empty()); EXPECT_FALSE(s.fault()); /*server error*/ EXPECT_FALSE(s.error()); /*client error*/ @@ -101,20 +104,21 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); EXPECT_EQ(expected_content_length, s.http().response().fields().at("content_length").number_value()); - EXPECT_STREQ(expected_client_ip, - s.http().request().fields().at("client_ip").string_value().c_str()); + EXPECT_EQ(expected_client_ip, + s.http().request().fields().at("client_ip").string_value().c_str()); EXPECT_EQ(expected_x_forwarded_for, s.http().request().fields().at("x_forwarded_for").bool_value()); - EXPECT_STREQ(expected_upstream_address, - s.annotations().at(Tracing::Tags::get().UpstreamAddress).c_str()); + EXPECT_EQ(expected_upstream_address, + s.annotations().at(Tracing::Tags::get().UpstreamAddress).c_str()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -126,7 +130,7 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { } TEST_F(XRayTracerTest, SerializeSpanTestServerError) { - constexpr auto expected_error = "true"; + constexpr absl::string_view expected_error = "true"; constexpr uint32_t expected_status_code = 503; auto on_send = [&](const std::string& json) { @@ -144,12 +148,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestServerError) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -177,12 +182,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestClientError) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -209,12 +215,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestClientErrorWithThrottle) { s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -235,12 +242,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithEmptyValue) { EXPECT_FALSE(s.http().request().fields().contains(Tracing::Tags::get().Status)); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -249,8 +257,9 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithEmptyValue) { } TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { - constexpr auto expected_status_code = "ok"; // status code which is not a number - constexpr auto expected_content_length = "huge"; // response length which is not a number + constexpr absl::string_view expected_status_code = "ok"; // status code which is not a number + constexpr absl::string_view expected_content_length = + "huge"; // response length which is not a number auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); @@ -265,12 +274,13 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { EXPECT_FALSE(s.http().request().fields().contains("content_length")); }; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Egress)); EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); @@ -318,15 +328,15 @@ TEST_F(XRayTracerTest, GetTraceId) { } TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { - NiceMock config; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); const auto& broker = *broker_; Tracer tracer{expected_->span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(999)); - auto parent_span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto parent_span = tracer.startSpan(config_, expected_->operation_name, + server_.timeSource().systemTime(), absl::nullopt /*headers*/); const XRay::Span* xray_parent_span = static_cast(parent_span.get()); auto on_send = [&](const std::string& json) { @@ -345,17 +355,17 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(262626262626)); - auto child = - parent_span->spawnChild(config, expected_->operation_name, server_.timeSource().systemTime()); + auto child = parent_span->spawnChild(config_, expected_->operation_name, + server_.timeSource().systemTime()); child->finishSpan(); } TEST_F(XRayTracerTest, UseExistingHeaderInformation) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -363,7 +373,7 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); @@ -371,13 +381,13 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { } TEST_F(XRayTracerTest, DontStartSpanOnNonSampledSpans) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; xray_header.sample_decision_ = SamplingDecision::NotSampled; // not sampled means we should panic on calling startSpan - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -386,18 +396,18 @@ TEST_F(XRayTracerTest, DontStartSpanOnNonSampledSpans) { server_.timeSource(), server_.api().randomGenerator()}; Tracing::SpanPtr span; - ASSERT_DEATH(span = - tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header), - "panic: not reached"); + ASSERT_DEATH( + span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header), + "panic: not reached"); } TEST_F(XRayTracerTest, UnknownSpanStillSampled) { + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); XRayHeader xray_header; xray_header.trace_id_ = "a"; xray_header.parent_id_ = "b"; xray_header.sample_decision_ = SamplingDecision::Unknown; - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -405,7 +415,7 @@ TEST_F(XRayTracerTest, UnknownSpanStillSampled) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); @@ -416,8 +426,8 @@ TEST_F(XRayTracerTest, UnknownSpanStillSampled) { } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { - constexpr auto span_name = "my span"; - constexpr auto operation_name = "my operation"; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", @@ -425,7 +435,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), + auto span = tracer.startSpan(config_, "ingress", server_.timeSource().systemTime(), absl::nullopt /*headers*/); Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); @@ -437,7 +447,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { - constexpr auto span_name = "my span"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", aws_metadata_, @@ -455,7 +465,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { } TEST_F(XRayTracerTest, TraceIDFormatTest) { - constexpr auto span_name = "my span"; + constexpr absl::string_view span_name = "my span"; Tracer tracer{span_name, "", aws_metadata_, @@ -479,6 +489,8 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, XRayDaemonTest, TestUtility::ipTestParamsToString); TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { + NiceMock config_; + ON_CALL(config_, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); absl::flat_hash_map aws_metadata; NiceMock server; Network::Test::UdpSyncPeer xray_fake_daemon(GetParam()); @@ -486,8 +498,8 @@ TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { Tracer tracer{"my_segment", "origin", aws_metadata, std::make_unique(daemon_endpoint), server.timeSource(), server.api().randomGenerator()}; - auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), - absl::nullopt /*headers*/); + auto span = tracer.startSpan(config_, "ingress" /*operation name*/, + server.timeSource().systemTime(), absl::nullopt /*headers*/); span->setTag(Tracing::Tags::get().HttpStatusCode, "202"); span->finishSpan(); diff --git a/test/extensions/tracers/xray/xray_tracer_impl_test.cc b/test/extensions/tracers/xray/xray_tracer_impl_test.cc index ba64f355aa97..c28faf5f8458 100644 --- a/test/extensions/tracers/xray/xray_tracer_impl_test.cc +++ b/test/extensions/tracers/xray/xray_tracer_impl_test.cc @@ -61,7 +61,7 @@ TEST_F(XRayDriverTest, XRayTraceHeaderSampled) { } TEST_F(XRayDriverTest, XRayTraceHeaderSamplingUnknown) { - request_headers_.addCopy(XRayTraceHeader, "Root=1-272793;Parent=5398ad8"); + request_headers_.addCopy(XRayTraceHeader, "Root=1-272793;Parent=5398ad8;Sampled="); XRayConfiguration config{"" /*daemon_endpoint*/, "test_segment_name", "" /*sampling_rules*/, "" /*origin*/, aws_metadata_}; diff --git a/test/extensions/transport_sockets/tcp_stats/BUILD b/test/extensions/transport_sockets/tcp_stats/BUILD new file mode 100644 index 000000000000..525b3d61ce8d --- /dev/null +++ b/test/extensions/transport_sockets/tcp_stats/BUILD @@ -0,0 +1,42 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "tcp_stats_test", + srcs = ["tcp_stats_test.cc"], + extension_names = ["envoy.transport_sockets.tcp_stats"], + deps = [ + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/extensions/transport_sockets/tcp_stats:config", + "//source/extensions/transport_sockets/tcp_stats:tcp_stats_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:io_handle_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/network:transport_socket_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", + "@envoy_api//envoy/extensions/transport_sockets/tcp_stats/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "tcp_stats_integration_test", + srcs = ["tcp_stats_integration_test.cc"], + extension_names = ["envoy.transport_sockets.tcp_stats"], + deps = [ + "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/transport_sockets/tcp_stats:config", + "//test/integration:http_integration_lib", + "//test/integration:integration_lib", + "@envoy_api//envoy/extensions/transport_sockets/tcp_stats/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/transport_sockets/tcp_stats/tcp_stats_integration_test.cc b/test/extensions/transport_sockets/tcp_stats/tcp_stats_integration_test.cc new file mode 100644 index 000000000000..2b8d37ee37e2 --- /dev/null +++ b/test/extensions/transport_sockets/tcp_stats/tcp_stats_integration_test.cc @@ -0,0 +1,125 @@ +#if defined(__linux__) +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.h" + +#include "test/integration/integration.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { +namespace { +class TcpStatsSocketIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + TcpStatsSocketIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} + + void initialize() override { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + envoy::config::core::v3::TransportSocket inner_socket; + inner_socket.set_name("envoy.transport_sockets.raw_buffer"); + envoy::extensions::transport_sockets::tcp_stats::v3::Config proto_config; + proto_config.mutable_transport_socket()->MergeFrom(inner_socket); + + auto* cluster_transport_socket = + bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket(); + cluster_transport_socket->set_name("envoy.transport_sockets.tcp_stats"); + cluster_transport_socket->mutable_typed_config()->PackFrom(proto_config); + + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + auto* listener_transport_socket = + listener->mutable_filter_chains(0)->mutable_transport_socket(); + listener_transport_socket->set_name("envoy.transport_sockets.tcp_stats"); + listener_transport_socket->mutable_typed_config()->PackFrom(proto_config); + + listener->set_stat_prefix("test"); + }); + BaseIntegrationTest::initialize(); + } + + FakeRawConnectionPtr fake_upstream_connection_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, TcpStatsSocketIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Verify that: +// * Stats are in the correct scope/namespace. +// * The syscall to get the data is producing meaningful results. +TEST_P(TcpStatsSocketIntegrationTest, Basic) { + initialize(); + + auto begin = std::chrono::steady_clock::now(); // NO_CHECK_FORMAT(real_time) + + auto listener_port = lookupPort("listener_0"); + auto tcp_client = makeTcpConnection(listener_port); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_)); + + ASSERT_TRUE(tcp_client->write("data")); + ASSERT_TRUE(fake_upstream_connection_->waitForData(4)); + ASSERT_TRUE(fake_upstream_connection_->write("response")); + tcp_client->waitForData("response"); + + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + auto end = std::chrono::steady_clock::now(); // NO_CHECK_FORMAT(real_time) + + // Record the duration of the test to use as an upper bound on the round trip time measurement. + std::chrono::microseconds test_duration_us = + std::chrono::duration_cast(end - begin); + + // Validate that these stats exist (in the correct namespace), and Wait for values to be available + // before validating values and ranges. Gauges/counters and histograms go through slightly + // different paths, so check each to avoid test flakes. + test_server_->waitUntilHistogramHasSamples("cluster.cluster_0.tcp_stats.cx_rtt_us"); + test_server_->waitForCounterGe("cluster.cluster_0.tcp_stats.cx_tx_segments", 1); + test_server_->waitUntilHistogramHasSamples("listener.test.tcp_stats.cx_rtt_us"); + test_server_->waitForCounterGe("listener.test.tcp_stats.cx_tx_segments", 1); + + auto validateCounterRange = [this](const std::string& name, uint64_t lower, uint64_t upper) { + auto counter = test_server_->counter(absl::StrCat("cluster.cluster_0.tcp_stats.", name)); + EXPECT_GE(counter->value(), lower); + EXPECT_LE(counter->value(), upper); + }; + auto validateGaugeRange = [this](const std::string& name, int64_t lower, int64_t upper) { + auto counter = test_server_->gauge(absl::StrCat("cluster.cluster_0.tcp_stats.", name)); + EXPECT_GE(counter->value(), lower); + EXPECT_LE(counter->value(), upper); + }; + auto validateHistogramRange = [this](const std::string& name, int64_t lower, int64_t upper) { + auto histogram = test_server_->histogram(absl::StrCat("cluster.cluster_0.tcp_stats.", name)); + auto& summary = histogram->cumulativeStatistics(); + + // With only 1 sample, the `sampleSum()` is the one value that has been recorded. + EXPECT_EQ(1, summary.sampleCount()); + EXPECT_GE(summary.sampleSum(), lower); + EXPECT_LE(summary.sampleSum(), upper); + }; + + // These values are intentionally very loose to avoid test flakes. They're just trying to verify + // that the values are at least in the approximate range of what we expect, and in the units we + // expect. + validateCounterRange("cx_tx_segments", 2, 20); + validateCounterRange("cx_rx_segments", 2, 20); + validateCounterRange("cx_tx_data_segments", 1, 10); + validateCounterRange("cx_rx_data_segments", 1, 10); + validateCounterRange("cx_tx_retransmitted_segments", 0, 10); + + // After the connection is closed, there should be no unsent or unacked data. + validateGaugeRange("cx_tx_unsent_bytes", 0, 0); + validateGaugeRange("cx_tx_unacked_segments", 0, 0); + + validateHistogramRange("cx_tx_percent_retransmitted_segments", 0, + Stats::Histogram::PercentScale); // 0-100% + validateHistogramRange("cx_rtt_us", 1, test_duration_us.count()); + validateHistogramRange("cx_rtt_variance_us", 1, test_duration_us.count()); +} + +} // namespace +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy +#endif diff --git a/test/extensions/transport_sockets/tcp_stats/tcp_stats_test.cc b/test/extensions/transport_sockets/tcp_stats/tcp_stats_test.cc new file mode 100644 index 000000000000..15836373be8c --- /dev/null +++ b/test/extensions/transport_sockets/tcp_stats/tcp_stats_test.cc @@ -0,0 +1,329 @@ +#if defined(__linux__) +#define DO_NOT_INCLUDE_NETINET_TCP_H 1 + +#include + +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.h" + +#include "source/extensions/transport_sockets/tcp_stats/config.h" +#include "source/extensions/transport_sockets/tcp_stats/tcp_stats.h" + +#include "test/mocks/network/io_handle.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" +#include "test/mocks/server/transport_socket_factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AtLeast; +using testing::Return; +using testing::ReturnNull; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { +namespace { + +class TcpStatsTest : public testing::Test { +public: + void initialize(bool enable_periodic) { + envoy::extensions::transport_sockets::tcp_stats::v3::Config proto_config; + if (enable_periodic) { + proto_config.mutable_update_period()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + } + config_ = std::make_shared(proto_config, store_); + ON_CALL(transport_callbacks_, ioHandle()).WillByDefault(ReturnRef(io_handle_)); + ON_CALL(io_handle_, getOption(IPPROTO_TCP, TCP_INFO, _, _)) + .WillByDefault(Invoke([this](int, int, void* optval, socklen_t* optlen) { + ASSERT(*optlen == sizeof(tcp_info_)); + memcpy(optval, &tcp_info_, sizeof(tcp_info_)); + return Api::SysCallIntResult{0, 0}; + })); + createTcpStatsSocket(enable_periodic, timer_, inner_socket_, tcp_stats_socket_); + } + + void createTcpStatsSocket(bool enable_periodic, NiceMock*& timer, + NiceMock*& inner_socket_out, + std::unique_ptr& tcp_stats_socket) { + if (enable_periodic) { + timer = new NiceMock(&transport_callbacks_.connection_.dispatcher_); + EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(1000), _)).Times(AtLeast(1)); + } + auto inner_socket = std::make_unique>(); + inner_socket_out = inner_socket.get(); + tcp_stats_socket = std::make_unique(config_, std::move(inner_socket)); + tcp_stats_socket->setTransportSocketCallbacks(transport_callbacks_); + tcp_stats_socket->onConnected(); + } + + uint64_t counterValue(absl::string_view name) { + auto opt_ref = store_.findCounterByString(absl::StrCat("tcp_stats.", name)); + ASSERT(opt_ref.has_value()); + return opt_ref.value().get().value(); + } + + int64_t gaugeValue(absl::string_view name) { + auto opt_ref = store_.findGaugeByString(absl::StrCat("tcp_stats.", name)); + ASSERT(opt_ref.has_value()); + return opt_ref.value().get().value(); + } + + absl::optional histogramValue(absl::string_view name) { + std::vector values = store_.histogramValues(absl::StrCat("tcp_stats.", name), true); + ASSERT(values.size() <= 1, + absl::StrCat(name, " didn't have <=1 value, instead had ", values.size())); + if (values.empty()) { + return absl::nullopt; + } else { + return values[0]; + } + } + + Stats::TestUtil::TestStore store_; + NiceMock* inner_socket_; + NiceMock io_handle_; + std::shared_ptr config_; + std::unique_ptr tcp_stats_socket_; + NiceMock transport_callbacks_; + NiceMock* timer_; + struct tcp_info tcp_info_; +}; + +// Validate that the configured update_period is honored, and that stats are updated when the timer +// fires. +TEST_F(TcpStatsTest, Periodic) { + initialize(true); + + EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(1000), _)); + tcp_info_.tcpi_notsent_bytes = 42; + timer_->callback_(); + EXPECT_EQ(42, gaugeValue("cx_tx_unsent_bytes")); + + EXPECT_CALL(*timer_, disableTimer()); + tcp_stats_socket_->closeSocket(Network::ConnectionEvent::RemoteClose); +} + +// Validate that stats are updated when the connection is closed. Gauges should be set to zero, +// and counters should be appropriately updated. +TEST_F(TcpStatsTest, CloseSocket) { + initialize(false); + + tcp_info_.tcpi_segs_out = 42; + tcp_info_.tcpi_notsent_bytes = 1; + tcp_info_.tcpi_unacked = 2; + EXPECT_CALL(*inner_socket_, closeSocket(Network::ConnectionEvent::RemoteClose)); + tcp_stats_socket_->closeSocket(Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(42, counterValue("cx_tx_segments")); + EXPECT_EQ(0, gaugeValue("cx_tx_unsent_bytes")); + EXPECT_EQ(0, gaugeValue("cx_tx_unacked_segments")); +} + +TEST_F(TcpStatsTest, SyscallFailureShortRead) { + initialize(true); + tcp_info_.tcpi_notsent_bytes = 42; + EXPECT_CALL(io_handle_, getOption(IPPROTO_TCP, TCP_INFO, _, _)) + .WillOnce(Invoke([this](int, int, void* optval, socklen_t* optlen) { + *optlen = *optlen - 1; + memcpy(optval, &tcp_info_, sizeof(*optlen)); + return Api::SysCallIntResult{0, 0}; + })); + EXPECT_LOG_CONTAINS( + "debug", + fmt::format("Failed getsockopt(IPPROTO_TCP, TCP_INFO): rc 0 errno 0 optlen {}", + sizeof(tcp_info_) - 1), + timer_->callback_()); + + // Not updated on failed syscall. + EXPECT_EQ(0, gaugeValue("cx_tx_unsent_bytes")); +} + +TEST_F(TcpStatsTest, SyscallFailureReturnCode) { + initialize(true); + tcp_info_.tcpi_notsent_bytes = 42; + EXPECT_CALL(io_handle_, getOption(IPPROTO_TCP, TCP_INFO, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, 42})); + EXPECT_LOG_CONTAINS( + "debug", + fmt::format("Failed getsockopt(IPPROTO_TCP, TCP_INFO): rc -1 errno 42 optlen {}", + sizeof(tcp_info_)), + timer_->callback_()); + + // Not updated on failed syscall. + EXPECT_EQ(0, gaugeValue("cx_tx_unsent_bytes")); +} + +// Validate that the emitted values are correct, that delta updates from a counter move the value by +// the delta (not the entire value), and that multiple sockets interact correctly (stats are +// summed). +TEST_F(TcpStatsTest, Values) { + initialize(true); + + NiceMock* timer2; + NiceMock* inner_socket2; + std::unique_ptr tcp_stats_socket2; + createTcpStatsSocket(true, timer2, inner_socket2, tcp_stats_socket2); + + // After the first call, stats should be set to exactly these values. + tcp_info_.tcpi_total_retrans = 1; + tcp_info_.tcpi_segs_out = 2; + tcp_info_.tcpi_segs_in = 3; + tcp_info_.tcpi_data_segs_out = 4; + tcp_info_.tcpi_data_segs_in = 5; + tcp_info_.tcpi_notsent_bytes = 6; + tcp_info_.tcpi_unacked = 7; + tcp_info_.tcpi_rtt = 8; + tcp_info_.tcpi_rttvar = 9; + timer_->callback_(); + EXPECT_EQ(1, counterValue("cx_tx_retransmitted_segments")); + EXPECT_EQ(2, counterValue("cx_tx_segments")); + EXPECT_EQ(3, counterValue("cx_rx_segments")); + EXPECT_EQ(4, counterValue("cx_tx_data_segments")); + EXPECT_EQ(5, counterValue("cx_rx_data_segments")); + EXPECT_EQ(6, gaugeValue("cx_tx_unsent_bytes")); + EXPECT_EQ(7, gaugeValue("cx_tx_unacked_segments")); + EXPECT_EQ(8U, histogramValue("cx_rtt_us")); + EXPECT_EQ(9U, histogramValue("cx_rtt_variance_us")); + EXPECT_EQ((1U * Stats::Histogram::PercentScale) / 4U, + histogramValue("cx_tx_percent_retransmitted_segments")); + + // Trigger the timer again with unchanged values. The metrics should be unchanged (but the + // histograms should have emitted the value again). + timer_->callback_(); + EXPECT_EQ(1, counterValue("cx_tx_retransmitted_segments")); + EXPECT_EQ(2, counterValue("cx_tx_segments")); + EXPECT_EQ(3, counterValue("cx_rx_segments")); + EXPECT_EQ(4, counterValue("cx_tx_data_segments")); + EXPECT_EQ(5, counterValue("cx_rx_data_segments")); + EXPECT_EQ(6, gaugeValue("cx_tx_unsent_bytes")); + EXPECT_EQ(7, gaugeValue("cx_tx_unacked_segments")); + EXPECT_EQ(8U, histogramValue("cx_rtt_us")); + EXPECT_EQ(9U, histogramValue("cx_rtt_variance_us")); + // No more packets were transmitted (numerator and denominator deltas are zero), so no value + // should be emitted. + EXPECT_EQ(absl::nullopt, histogramValue("cx_tx_percent_retransmitted_segments")); + + // Set stats on 2nd socket. Values should be combined. + tcp_info_.tcpi_total_retrans = 1; + tcp_info_.tcpi_segs_out = 1; + tcp_info_.tcpi_segs_in = 1; + tcp_info_.tcpi_data_segs_out = 1; + tcp_info_.tcpi_data_segs_in = 1; + tcp_info_.tcpi_notsent_bytes = 1; + tcp_info_.tcpi_unacked = 1; + tcp_info_.tcpi_rtt = 1; + tcp_info_.tcpi_rttvar = 1; + timer2->callback_(); + EXPECT_EQ(2, counterValue("cx_tx_retransmitted_segments")); + EXPECT_EQ(3, counterValue("cx_tx_segments")); + EXPECT_EQ(4, counterValue("cx_rx_segments")); + EXPECT_EQ(5, counterValue("cx_tx_data_segments")); + EXPECT_EQ(6, counterValue("cx_rx_data_segments")); + EXPECT_EQ(7, gaugeValue("cx_tx_unsent_bytes")); + EXPECT_EQ(8, gaugeValue("cx_tx_unacked_segments")); + EXPECT_EQ(1U, histogramValue("cx_rtt_us")); + EXPECT_EQ(1U, histogramValue("cx_rtt_variance_us")); + EXPECT_EQ(Stats::Histogram::PercentScale /* 100% */, + histogramValue("cx_tx_percent_retransmitted_segments")); + + // Update the first socket again. + tcp_info_.tcpi_total_retrans = 2; + tcp_info_.tcpi_segs_out = 3; + tcp_info_.tcpi_segs_in = 4; + tcp_info_.tcpi_data_segs_out = 5; + tcp_info_.tcpi_data_segs_in = 6; + tcp_info_.tcpi_notsent_bytes = 7; + tcp_info_.tcpi_unacked = 8; + tcp_info_.tcpi_rtt = 9; + tcp_info_.tcpi_rttvar = 10; + timer_->callback_(); + EXPECT_EQ(3, counterValue("cx_tx_retransmitted_segments")); + EXPECT_EQ(4, counterValue("cx_tx_segments")); + EXPECT_EQ(5, counterValue("cx_rx_segments")); + EXPECT_EQ(6, counterValue("cx_tx_data_segments")); + EXPECT_EQ(7, counterValue("cx_rx_data_segments")); + EXPECT_EQ(8, gaugeValue("cx_tx_unsent_bytes")); + EXPECT_EQ(9, gaugeValue("cx_tx_unacked_segments")); + EXPECT_EQ(9U, histogramValue("cx_rtt_us")); + EXPECT_EQ(10U, histogramValue("cx_rtt_variance_us")); + // Delta of 1 on numerator and denominator. + EXPECT_EQ(Stats::Histogram::PercentScale /* 100% */, + histogramValue("cx_tx_percent_retransmitted_segments")); +} + +class TcpStatsSocketFactoryTest : public testing::Test { +public: + void initialize() { + envoy::extensions::transport_sockets::tcp_stats::v3::Config proto_config; + auto inner_factory = std::make_unique>(); + inner_factory_ = inner_factory.get(); + factory_ = + std::make_unique(context_, proto_config, std::move(inner_factory)); + } + + NiceMock context_; + NiceMock* inner_factory_; + std::unique_ptr factory_; +}; + +// Test createTransportSocket returns nullptr if inner call returns nullptr +TEST_F(TcpStatsSocketFactoryTest, CreateSocketReturnsNullWhenInnerFactoryReturnsNull) { + initialize(); + EXPECT_CALL(*inner_factory_, createTransportSocket(_)).WillOnce(ReturnNull()); + EXPECT_EQ(nullptr, factory_->createTransportSocket(nullptr)); +} + +// Test implementsSecureTransport calls inner factory +TEST_F(TcpStatsSocketFactoryTest, ImplementsSecureTransportCallInnerFactory) { + initialize(); + EXPECT_CALL(*inner_factory_, implementsSecureTransport()).WillOnce(Return(true)); + EXPECT_TRUE(factory_->implementsSecureTransport()); + + EXPECT_CALL(*inner_factory_, implementsSecureTransport()).WillOnce(Return(false)); + EXPECT_FALSE(factory_->implementsSecureTransport()); +} + +} // namespace +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy + +#else // #if defined(__linux__) + +#include "envoy/extensions/transport_sockets/tcp_stats/v3/tcp_stats.pb.h" + +#include "test/mocks/server/transport_socket_factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace TcpStats { + +TEST(TcpStatsTest, ConfigErrorOnUnsupportedPlatform) { + envoy::extensions::transport_sockets::tcp_stats::v3::Config proto_config; + proto_config.mutable_transport_socket()->set_name("envoy.transport_sockets.raw_buffer"); + NiceMock context; + + envoy::config::core::v3::TransportSocket transport_socket_config; + transport_socket_config.set_name("envoy.transport_sockets.tcp_stats"); + transport_socket_config.mutable_typed_config()->PackFrom(proto_config); + auto& config_factory = Config::Utility::getAndCheckFactory< + Server::Configuration::DownstreamTransportSocketConfigFactory>(transport_socket_config); + EXPECT_THROW_WITH_MESSAGE(config_factory.createTransportSocketFactory(proto_config, context, {}), + EnvoyException, + "envoy.transport_sockets.tcp_stats is not supported on this platform."); +} + +} // namespace TcpStats +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy + +#endif // #if defined(__linux__) diff --git a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc index 68925f05930d..036c3679fa1f 100644 --- a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc +++ b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc @@ -16,20 +16,6 @@ namespace Extensions { namespace TransportSockets { namespace Tls { -TEST(DefaultCertValidatorTest, TestDnsNameMatching) { - EXPECT_TRUE(DefaultCertValidator::dnsNameMatch("lyft.com", "lyft.com")); - EXPECT_TRUE(DefaultCertValidator::dnsNameMatch("a.lyft.com", "*.lyft.com")); - EXPECT_TRUE(DefaultCertValidator::dnsNameMatch("a.LYFT.com", "*.lyft.COM")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("foo.test.com", "*.lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("lyft.com", "*.lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("alyft.com", "*.lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("alyft.com", "*lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("lyft.com", "*lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("", "*lyft.com")); - EXPECT_FALSE(DefaultCertValidator::dnsNameMatch("lyft.com", "")); -} - TEST(DefaultCertValidatorTest, TestVerifySubjectAltNameDNSMatched) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); diff --git a/test/extensions/transport_sockets/tls/handshaker_factory_test.cc b/test/extensions/transport_sockets/tls/handshaker_factory_test.cc index 23903a2b73fa..0c0907c3e056 100644 --- a/test/extensions/transport_sockets/tls/handshaker_factory_test.cc +++ b/test/extensions/transport_sockets/tls/handshaker_factory_test.cc @@ -26,6 +26,11 @@ namespace TransportSockets { namespace Tls { namespace { +using ::testing::MockFunction; +using ::testing::Ref; +using ::testing::Return; +using ::testing::WithArg; + // Test-only custom process object which accepts an `SslCtxCb` for in-test SSL_CTX // manipulation. class CustomProcessObjectForTest : public ProcessObject { @@ -48,7 +53,29 @@ class CustomProcessObjectForTest : public ProcessObject { // case, using a process context to modify the SSL_CTX. class HandshakerFactoryImplForTest : public Extensions::TransportSockets::Tls::HandshakerFactoryImpl { - std::string name() const override { return "envoy.testonly_handshaker"; } +public: + using CreateHandshakerHook = + std::function; + + static constexpr char kFactoryName[] = "envoy.testonly_handshaker"; + + std::string name() const override { return kFactoryName; } + + Ssl::HandshakerFactoryCb + createHandshakerCb(const Protobuf::Message& message, Ssl::HandshakerFactoryContext& context, + ProtobufMessage::ValidationVisitor& validation_visitor) override { + if (handshaker_cb_) { + handshaker_cb_(message, context, validation_visitor); + } + + // The default HandshakerImpl doesn't take a config or use the HandshakerFactoryContext. + return [](bssl::UniquePtr ssl, int ssl_extended_socket_info_index, + Ssl::HandshakeCallbacks* handshake_callbacks) { + return std::make_shared(std::move(ssl), ssl_extended_socket_info_index, + handshake_callbacks); + }; + } Ssl::SslCtxCb sslctxCb(Ssl::HandshakerFactoryContext& handshaker_factory_context) const override { // Get process object, cast to custom process object, and return custom @@ -56,6 +83,8 @@ class HandshakerFactoryImplForTest return CustomProcessObjectForTest::get(handshaker_factory_context.api().processContext()) ->getSslCtxCb(); } + + CreateHandshakerHook handshaker_cb_; }; class HandshakerFactoryTest : public testing::Test { @@ -65,9 +94,9 @@ class HandshakerFactoryTest : public testing::Test { std::make_unique(time_system_)), registered_factory_(handshaker_factory_) { // UpstreamTlsContext proto expects to use the newly-registered handshaker. - envoy::config::core::v3::TypedExtensionConfig* custom_handshaker_ = + envoy::config::core::v3::TypedExtensionConfig* custom_handshaker = tls_context_.mutable_common_tls_context()->mutable_custom_handshaker(); - custom_handshaker_->set_name("envoy.testonly_handshaker"); + custom_handshaker->set_name(HandshakerFactoryImplForTest::kFactoryName); } // Helper for downcasting a socket to a test socket so we can examine its @@ -87,7 +116,7 @@ class HandshakerFactoryTest : public testing::Test { }; TEST_F(HandshakerFactoryTest, SetMockFunctionCb) { - testing::MockFunction cb; + MockFunction cb; EXPECT_CALL(cb, Call); CustomProcessObjectForTest custom_process_object_for_test(cb.AsStdFunction()); @@ -96,8 +125,7 @@ TEST_F(HandshakerFactoryTest, SetMockFunctionCb) { NiceMock mock_factory_ctx; EXPECT_CALL(mock_factory_ctx.api_, processContext()) - .WillRepeatedly( - testing::Return(std::reference_wrapper(*process_context_impl))); + .WillRepeatedly(Return(std::reference_wrapper(*process_context_impl))); Extensions::TransportSockets::Tls::ClientSslSocketFactory socket_factory( /*config=*/ @@ -122,8 +150,7 @@ TEST_F(HandshakerFactoryTest, SetSpecificSslCtxOption) { NiceMock mock_factory_ctx; EXPECT_CALL(mock_factory_ctx.api_, processContext()) - .WillRepeatedly( - testing::Return(std::reference_wrapper(*process_context_impl))); + .WillRepeatedly(Return(std::reference_wrapper(*process_context_impl))); Extensions::TransportSockets::Tls::ClientSslSocketFactory socket_factory( /*config=*/ @@ -140,6 +167,36 @@ TEST_F(HandshakerFactoryTest, SetSpecificSslCtxOption) { EXPECT_TRUE(SSL_CTX_get_options(ssl_ctx) & SSL_OP_NO_TLSv1); } +TEST_F(HandshakerFactoryTest, HandshakerContextProvidesObjectsFromParentContext) { + CustomProcessObjectForTest custom_process_object_for_test( + /*cb=*/[](SSL_CTX* ssl_ctx) { SSL_CTX_set_options(ssl_ctx, SSL_OP_NO_TLSv1); }); + auto process_context_impl = std::make_unique( + static_cast(custom_process_object_for_test)); + + NiceMock mock_factory_ctx; + EXPECT_CALL(mock_factory_ctx.api_, processContext()) + .WillRepeatedly(Return(std::reference_wrapper(*process_context_impl))); + + MockFunction mock_factory_cb; + handshaker_factory_.handshaker_cb_ = mock_factory_cb.AsStdFunction(); + + EXPECT_CALL(mock_factory_cb, Call) + .WillOnce(WithArg<1>([&](Ssl::HandshakerFactoryContext& context) { + // Check that the objects available via the context are the same ones + // provided to the parent context. + EXPECT_THAT(context.api(), Ref(mock_factory_ctx.api_)); + EXPECT_THAT(context.options(), Ref(mock_factory_ctx.options_)); + })); + + Extensions::TransportSockets::Tls::ClientSslSocketFactory socket_factory( + /*config=*/ + std::make_unique( + tls_context_, "", mock_factory_ctx), + *context_manager_, stats_store_); + + std::unique_ptr socket = socket_factory.createTransportSocket(nullptr); +} + } // namespace } // namespace Tls } // namespace TransportSockets diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 24b2e1422c7e..24b1952640a5 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -272,6 +272,13 @@ class TestUtilOptions : public TestUtilOptionsBase { } const std::string& notExpectedClientStats() const { return not_expected_client_stats_; } + TestUtilOptions& setExpectedVerifyErrorCode(int code) { + expected_verify_error_code_ = code; + return *this; + } + + int expectedVerifyErrorCode() const { return expected_verify_error_code_; } + private: const std::string client_ctx_yaml_; const std::string server_ctx_yaml_; @@ -295,6 +302,7 @@ class TestUtilOptions : public TestUtilOptionsBase { bool ocsp_stapling_enabled_{false}; std::string expected_transport_failure_reason_contains_; std::string not_expected_client_stats_; + int expected_verify_error_code_{-1}; }; void testUtil(const TestUtilOptions& options) { @@ -495,7 +503,12 @@ void testUtil(const TestUtilOptions& options) { .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { close_second_time(); })); } - dispatcher->run(Event::Dispatcher::RunType::Block); + if (options.expectedVerifyErrorCode() != -1) { + EXPECT_LOG_CONTAINS("debug", X509_verify_cert_error_string(options.expectedVerifyErrorCode()), + dispatcher->run(Event::Dispatcher::RunType::Block)); + } else { + dispatcher->run(Event::Dispatcher::RunType::Block); + } if (!options.expectedServerStats().empty()) { EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value()); @@ -658,6 +671,7 @@ void testUtilV2(const TestUtilOptionsV2& options) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, server_names); + EXPECT_FALSE(server_ssl_socket_factory.usesProxyProtocolOptions()); Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); auto socket = std::make_shared( @@ -702,9 +716,11 @@ void testUtilV2(const TestUtilOptionsV2& options) { ? options.transportSocketOptions()->serverNameOverride().value() : options.clientCtxProto().sni(); socket->setRequestedServerName(sni); + Network::TransportSocketPtr transport_socket = + server_ssl_socket_factory.createTransportSocket(nullptr); + EXPECT_FALSE(transport_socket->startSecureTransport()); server_connection = dispatcher->createServerConnection( - std::move(socket), server_ssl_socket_factory.createTransportSocket(nullptr), - stream_info); + std::move(socket), std::move(transport_socket), stream_info); server_connection->addConnectionCallbacks(server_connection_callbacks); })); @@ -1498,7 +1514,8 @@ TEST_P(SslSocketTest, FailedClientAuthCaVerification) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); - testUtil(test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY)); } TEST_P(SslSocketTest, FailedClientAuthSanVerificationNoClientCert) { @@ -1897,7 +1914,8 @@ TEST_P(SslSocketTest, FailedClientCertificateHashVerificationWrongCA) { TEST_SAN_URI_CERT_256_HASH, "\""); TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); - testUtil(test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY)); } TEST_P(SslSocketTest, CertificatesWithPassword) { @@ -4352,7 +4370,8 @@ TEST_P(SslSocketTest, RevokedCertificate) { )EOF"; TestUtilOptions revoked_test_options(revoked_client_ctx_yaml, server_ctx_yaml, false, GetParam()); - testUtil(revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // This should succeed, since the cert isn't revoked. const std::string successful_client_ctx_yaml = R"EOF( @@ -4394,7 +4413,8 @@ TEST_P(SslSocketTest, RevokedCertificateCRLInTrustedCA) { )EOF"; TestUtilOptions revoked_test_options(revoked_client_ctx_yaml, server_ctx_yaml, false, GetParam()); - testUtil(revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // This should succeed, since the cert isn't revoked. const std::string successful_client_ctx_yaml = R"EOF( @@ -4483,17 +4503,20 @@ TEST_P(SslSocketTest, RevokedIntermediateCertificate) { // Ensure that incomplete crl chains fail with revoked certificates. TestUtilOptions incomplete_revoked_test_options(revoked_client_ctx_yaml, incomplete_server_ctx_yaml, false, GetParam()); - testUtil(incomplete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(incomplete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // Ensure that incomplete crl chains fail with unrevoked certificates. TestUtilOptions incomplete_unrevoked_test_options(unrevoked_client_ctx_yaml, incomplete_server_ctx_yaml, false, GetParam()); - testUtil(incomplete_unrevoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(incomplete_unrevoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_UNABLE_TO_GET_CRL)); // Ensure that complete crl chains fail with revoked certificates. TestUtilOptions complete_revoked_test_options(revoked_client_ctx_yaml, complete_server_ctx_yaml, false, GetParam()); - testUtil(complete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(complete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // Ensure that complete crl chains succeed with unrevoked certificates. TestUtilOptions complete_unrevoked_test_options(unrevoked_client_ctx_yaml, @@ -4566,17 +4589,20 @@ TEST_P(SslSocketTest, RevokedIntermediateCertificateCRLInTrustedCA) { // Ensure that incomplete crl chains fail with revoked certificates. TestUtilOptions incomplete_revoked_test_options(revoked_client_ctx_yaml, incomplete_server_ctx_yaml, false, GetParam()); - testUtil(incomplete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(incomplete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // Ensure that incomplete crl chains fail with unrevoked certificates. TestUtilOptions incomplete_unrevoked_test_options(unrevoked_client_ctx_yaml, incomplete_server_ctx_yaml, false, GetParam()); - testUtil(incomplete_unrevoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(incomplete_unrevoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_UNABLE_TO_GET_CRL)); // Ensure that complete crl chains fail with revoked certificates. TestUtilOptions complete_revoked_test_options(revoked_client_ctx_yaml, complete_server_ctx_yaml, false, GetParam()); - testUtil(complete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error")); + testUtil(complete_revoked_test_options.setExpectedServerStats("ssl.fail_verify_error") + .setExpectedVerifyErrorCode(X509_V_ERR_CERT_REVOKED)); // Ensure that complete crl chains succeed with unrevoked certificates. TestUtilOptions complete_unrevoked_test_options(unrevoked_client_ctx_yaml, diff --git a/test/extensions/transport_sockets/tls/utility_test.cc b/test/extensions/transport_sockets/tls/utility_test.cc index 531599c01be5..22aef56c88ec 100644 --- a/test/extensions/transport_sockets/tls/utility_test.cc +++ b/test/extensions/transport_sockets/tls/utility_test.cc @@ -1,6 +1,7 @@ #include #include +#include "source/common/common/c_smart_ptr.h" #include "source/extensions/transport_sockets/tls/utility.h" #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" @@ -21,6 +22,23 @@ namespace TransportSockets { namespace Tls { namespace { +using X509StoreContextPtr = CSmartPtr; +using X509StorePtr = CSmartPtr; + +TEST(UtilityTest, TestDnsNameMatching) { + EXPECT_TRUE(Utility::dnsNameMatch("lyft.com", "lyft.com")); + EXPECT_TRUE(Utility::dnsNameMatch("a.lyft.com", "*.lyft.com")); + EXPECT_TRUE(Utility::dnsNameMatch("a.LYFT.com", "*.lyft.COM")); + EXPECT_FALSE(Utility::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("foo.test.com", "*.lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("lyft.com", "*.lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("alyft.com", "*.lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("alyft.com", "*lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("lyft.com", "*lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("", "*lyft.com")); + EXPECT_FALSE(Utility::dnsNameMatch("lyft.com", "")); +} + TEST(UtilityTest, TestGetSubjectAlternateNamesWithDNS) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); @@ -164,6 +182,18 @@ TEST(UtilityTest, SslErrorDescriptionTest) { "Unknown BoringSSL error had occurred"); } +TEST(UtilityTest, TestGetX509ErrorInfo) { + auto cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); + X509StoreContextPtr store_ctx = X509_STORE_CTX_new(); + X509StorePtr ssl_ctx = X509_STORE_new(); + EXPECT_TRUE(X509_STORE_CTX_init(store_ctx.get(), ssl_ctx.get(), cert.get(), nullptr)); + X509_STORE_CTX_set_error(store_ctx.get(), X509_V_ERR_UNSPECIFIED); + EXPECT_EQ(Utility::getX509VerificationErrorInfo(store_ctx.get()), + "X509_verify_cert: certificate verification error at depth 0: unknown certificate " + "verification error"); +} + } // namespace } // namespace Tls } // namespace TransportSockets diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index c5310d25fe1b..c03955f25d02 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -97,6 +97,7 @@ class TcpUpstreamTest : public ::testing::Test { .Times(AnyNumber()) .WillRepeatedly(Return(&request_)); EXPECT_CALL(mock_router_filter_, cluster()).Times(AnyNumber()); + EXPECT_CALL(mock_router_filter_, callbacks()).Times(AnyNumber()); mock_router_filter_.requests_.push_back(std::make_unique( mock_router_filter_, std::make_unique>())); auto data = std::make_unique>(); diff --git a/test/extensions/watchdog/profile_action/BUILD b/test/extensions/watchdog/profile_action/BUILD index 9a36ed413146..18b1f5f938ed 100644 --- a/test/extensions/watchdog/profile_action/BUILD +++ b/test/extensions/watchdog/profile_action/BUILD @@ -32,7 +32,7 @@ envoy_extension_cc_test( "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) @@ -48,6 +48,6 @@ envoy_extension_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/watchdog/profile_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/watchdog/profile_action/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/watchdog/profile_action/config_test.cc b/test/extensions/watchdog/profile_action/config_test.cc index 157128a62af9..6da8f22ef483 100644 --- a/test/extensions/watchdog/profile_action/config_test.cc +++ b/test/extensions/watchdog/profile_action/config_test.cc @@ -1,4 +1,4 @@ -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/guarddog_config.h" @@ -30,8 +30,8 @@ TEST(ProfileActionFactoryTest, CanCreateAction) { "config": { "name": "envoy.watchdog.profile_action", "typed_config": { - "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", - "type_url": "type.googleapis.com/envoy.extensions.watchdog.profile_action.v3alpha.ProfileActionConfig", + "@type": "type.googleapis.com/xds.type.v3.TypedStruct", + "type_url": "type.googleapis.com/envoy.extensions.watchdog.profile_action.v3.ProfileActionConfig", "value": { "profile_duration": "2s", "profile_path": "/tmp/envoy/", diff --git a/test/extensions/watchdog/profile_action/profile_action_test.cc b/test/extensions/watchdog/profile_action/profile_action_test.cc index d35bea645dc1..463958aab4a7 100644 --- a/test/extensions/watchdog/profile_action/profile_action_test.cc +++ b/test/extensions/watchdog/profile_action/profile_action_test.cc @@ -3,7 +3,7 @@ #include "envoy/common/time.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" -#include "envoy/extensions/watchdog/profile_action/v3alpha/profile_action.pb.h" +#include "envoy/extensions/watchdog/profile_action/v3/profile_action.pb.h" #include "envoy/filesystem/filesystem.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" @@ -92,7 +92,7 @@ class ProfileActionTest : public testing::Test { TEST_F(ProfileActionTest, CanDoSingleProfile) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); @@ -132,7 +132,7 @@ TEST_F(ProfileActionTest, CanDoSingleProfile) { TEST_F(ProfileActionTest, CanDoMultipleProfiles) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); // Create the ProfileAction before we start running the dispatcher @@ -188,7 +188,7 @@ TEST_F(ProfileActionTest, CanDoMultipleProfiles) { TEST_F(ProfileActionTest, CannotTriggerConcurrentProfiles) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; TestUtility::loadFromJson(absl::Substitute(R"EOF({ "profile_path": "$0", })EOF", test_path_), config); // Create the ProfileAction before we start running the dispatcher @@ -229,7 +229,7 @@ TEST_F(ProfileActionTest, CannotTriggerConcurrentProfiles) { TEST_F(ProfileActionTest, ShouldNotProfileIfDirectoryDoesNotExist) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; const std::string nonexistant_path = test_path_ + "/nonexistant_dir/"; TestUtility::loadFromJson( absl::Substitute(R"EOF({ "profile_path": "$0", })EOF", nonexistant_path), config); @@ -264,7 +264,7 @@ TEST_F(ProfileActionTest, ShouldNotProfileIfDirectoryDoesNotExist) { TEST_F(ProfileActionTest, ShouldNotProfileIfNoTids) { // Create configuration. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; TestUtility::loadFromJson(absl::Substitute(R"EOF({ "profile_path": "$0"})EOF", test_path_), config); // Create the ProfileAction before we start running the dispatcher @@ -296,7 +296,7 @@ TEST_F(ProfileActionTest, ShouldNotProfileIfNoTids) { TEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) { // Create configuration that we'll run until it saturates. - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); config.set_max_profiles(1); @@ -358,7 +358,7 @@ TEST_F(ProfileActionTest, ShouldSaturatedMaxProfiles) { // interfere with an existing profile the action is running. // The successfully captured profile should be updated only if we captured the profile. TEST_F(ProfileActionTest, ShouldUpdateCountersCorrectly) { - envoy::extensions::watchdog::profile_action::v3alpha::ProfileActionConfig config; + envoy::extensions::watchdog::profile_action::v3::ProfileActionConfig config; config.set_profile_path(test_path_); config.mutable_profile_duration()->set_seconds(1); diff --git a/test/fuzz/fuzz_runner.cc b/test/fuzz/fuzz_runner.cc index 618116a2f382..5a657476e168 100644 --- a/test/fuzz/fuzz_runner.cc +++ b/test/fuzz/fuzz_runner.cc @@ -31,6 +31,8 @@ void Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum d // state. ProcessWide process_wide; TestEnvironment::initializeOptions(argc, argv); + static auto* test_thread = new Envoy::Thread::TestThread; + UNREFERENCED_PARAMETER(test_thread); const auto environment_log_level = TestEnvironment::getOptions().logLevel(); // We only override the default log level if it looks like we're debugging; diff --git a/test/fuzz/main.cc b/test/fuzz/main.cc index b339336620de..47784b8b71ba 100644 --- a/test/fuzz/main.cc +++ b/test/fuzz/main.cc @@ -14,6 +14,7 @@ #include "source/common/common/assert.h" #include "source/common/common/logger.h" +#include "source/common/common/thread.h" #include "test/fuzz/fuzz_runner.h" #include "test/test_common/environment.h" diff --git a/test/integration/BUILD b/test/integration/BUILD index 3c4709acfeea..7861e4f63a6c 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -34,7 +34,6 @@ envoy_cc_test_library( "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//source/common/version:version_lib", - "//source/extensions/filters/network/redis_proxy:config", "//test/common/grpc:grpc_client_integration_lib", "//test/config:v2_link_hacks", "//test/test_common:network_utility_lib", @@ -148,6 +147,21 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "leds_integration_test", + srcs = ["leds_integration_test.cc"], + deps = [ + ":http_integration_lib", + "//test/config:utility_lib", + "//test/test_common:network_utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], +) + envoy_proto_library( name = "filter_manager_integration_proto", srcs = [":filter_manager_integration_test.proto"], @@ -230,7 +244,6 @@ envoy_cc_test( ], deps = [ ":http_protocol_integration_lib", - "//source/extensions/filters/http/health_check:config", "//test/test_common:utility_lib", ], ) @@ -355,13 +368,12 @@ envoy_cc_test( "multiplexed_integration_test.cc", "multiplexed_integration_test.h", ], - shard_count = 4, + shard_count = 8, deps = [ ":http_protocol_integration_lib", "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:metadata_stop_all_filter_config_lib", "//test/integration/filters:on_local_reply_filter_config_lib", "//test/integration/filters:request_metadata_filter_config_lib", @@ -464,8 +476,7 @@ envoy_cc_test( ], deps = [ ":http_integration_lib", - "//source/extensions/filters/http/health_check:config", - "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", ], ) @@ -492,7 +503,6 @@ envoy_cc_test_library( ":http_protocol_integration_lib", "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/common/http/http2:http2_frame", "//test/integration/filters:continue_after_local_reply_filter_lib", "//test/integration/filters:continue_headers_only_inject_body", @@ -536,7 +546,6 @@ envoy_cc_test( "//source/common/http:header_map_lib", "//source/extensions/access_loggers/grpc:http_config", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:random_pause_filter_lib", "//test/test_common:utility_lib", @@ -557,7 +566,6 @@ envoy_cc_test( "//source/common/stats:histogram_lib", "//source/common/stats:stats_matcher_lib", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/health_check:config", "//test/common/stats:stat_test_utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -750,7 +758,9 @@ envoy_cc_test_library( ":fake_upstream_lib", ":integration_tcp_client_lib", ":utility_lib", + "//source/common/common:thread_lib", "//source/common/config:api_version_lib", + "//source/extensions/network/dns_resolver/cares:config", "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", "//source/extensions/transport_sockets/tls:ssl_socket_lib", @@ -769,7 +779,10 @@ envoy_cc_test_library( "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", - ], + ] + select({ + "//bazel:apple": ["//source/extensions/network/dns_resolver/apple:config"], + "//conditions:default": [], + }), ) envoy_cc_test_library( @@ -942,7 +955,6 @@ envoy_cc_test( ":http_integration_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/extensions/filters/http/health_check:config", "//test/integration/filters:clear_route_cache_filter_lib", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:invalid_header_filter_lib", @@ -1437,6 +1449,7 @@ envoy_cc_test( deps = [ ":http_integration_lib", ":http_protocol_integration_lib", + "//source/extensions/access_loggers/grpc:http_config", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//source/extensions/filters/network/tcp_proxy:config", @@ -1631,11 +1644,12 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/network:connection_lib", "//source/common/network:utility_lib", - "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/network/tcp_proxy:config", "//test/common/grpc:grpc_client_integration_lib", "//test/config:v2_link_hacks", "//test/integration/filters:address_restore_listener_filter_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -1734,6 +1748,7 @@ envoy_cc_test( "//test/common/http/http2:http2_frame", "//test/config:v2_link_hacks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -1841,3 +1856,17 @@ envoy_cc_test( "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "typed_metadata_integration_test", + srcs = [ + "typed_metadata_integration_test.cc", + ], + deps = [ + ":http_protocol_integration_lib", + "//source/common/protobuf", + "//test/integration/filters:listener_typed_metadata_filter_lib", + "//test/server:utility_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index 53716bfa5e7d..f97ad753fe52 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -22,9 +22,16 @@ using testing::AssertionResult; namespace Envoy { AdsIntegrationTest::AdsIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), - ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + : HttpIntegrationTest( + Http::CodecType::HTTP2, ipVersion(), + ConfigHelper::adsBootstrap((sotwOrDelta() == Grpc::SotwOrDelta::Sotw) || + (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw) + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = true; @@ -60,6 +67,26 @@ AdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) { name, Network::Test::getLoopbackAddressString(ipVersion()), 8443); } +envoy::config::endpoint::v3::ClusterLoadAssignment +AdsIntegrationTest::buildClusterLoadAssignmentWithLeds(const std::string& name, + const std::string& collection_name) { + return ConfigHelper::buildClusterLoadAssignmentWithLeds(name, collection_name); +} + +envoy::service::discovery::v3::Resource +AdsIntegrationTest::buildLbEndpointResource(const std::string& lb_endpoint_resource_name, + const std::string& version) { + envoy::service::discovery::v3::Resource resource; + resource.set_name(lb_endpoint_resource_name); + resource.set_version(version); + + envoy::config::endpoint::v3::LbEndpoint lb_endpoint = + ConfigHelper::buildLbEndpoint(Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[0]->localAddress()->ip()->port()); + resource.mutable_resource()->PackFrom(lb_endpoint); + return resource; +} + envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { @@ -101,6 +128,8 @@ void AdsIntegrationTest::makeSingleRequest() { void AdsIntegrationTest::initialize() { initializeAds(false); } void AdsIntegrationTest::initializeAds(const bool rate_limiting) { + config_helper_.addRuntimeOverride("envoy.restart_features.explicit_wildcard_resource", + oldDssOrNewDss() == OldDssOrNewDss::Old ? "false" : "true"); config_helper_.addConfigModifier([this, &rate_limiting]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index adc0a66ffe02..c56dc2cdc9e3 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -15,7 +15,34 @@ namespace Envoy { -class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { +// Support parameterizing over old DSS vs new DSS. Can be dropped when old DSS goes away. +enum class OldDssOrNewDss { Old, New }; + +// Base class that supports parameterizing over old DSS vs new DSS. Can be replaced with +// Grpc::BaseGrpcClientIntegrationParamTest when old DSS is removed. +class AdsDeltaSotwIntegrationSubStateParamTest + : public Grpc::BaseGrpcClientIntegrationParamTest, + public testing::TestWithParam> { +public: + ~AdsDeltaSotwIntegrationSubStateParamTest() override = default; + static std::string protocolTestParamsToString( + const ::testing::TestParamInfo>& p) { + return fmt::format( + "{}_{}_{}_{}", std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", + std::get<1>(p.param) == Grpc::ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", + std::get<2>(p.param) == Grpc::SotwOrDelta::Delta ? "Delta" : "StateOfTheWorld", + std::get<3>(p.param) == OldDssOrNewDss::Old ? "OldDSS" : "NewDSS"); + } + Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } + Grpc::ClientType clientType() const override { return std::get<1>(GetParam()); } + Grpc::SotwOrDelta sotwOrDelta() const { return std::get<2>(GetParam()); } + OldDssOrNewDss oldDssOrNewDss() const { return std::get<3>(GetParam()); } +}; + +class AdsIntegrationTest : public AdsDeltaSotwIntegrationSubStateParamTest, + public HttpIntegrationTest { public: AdsIntegrationTest(); @@ -34,6 +61,12 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::config::endpoint::v3::ClusterLoadAssignment buildTlsClusterLoadAssignment(const std::string& name); + envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignmentWithLeds(const std::string& name, const std::string& collection_name); + + envoy::service::discovery::v3::Resource + buildLbEndpointResource(const std::string& lb_endpoint_resource_name, const std::string& version); + envoy::config::listener::v3::Listener buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix = "ads_test"); @@ -56,4 +89,12 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::admin::v3::RoutesConfigDump getRoutesConfigDump(); }; +// When old delta subscription state goes away, we could replace this macro back with +// DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS. +#define ADS_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest()), \ + testing::Values(Grpc::SotwOrDelta::Sotw, Grpc::SotwOrDelta::Delta), \ + testing::Values(OldDssOrNewDss::Old, OldDssOrNewDss::New)) + } // namespace Envoy diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index f2d0edad3ca8..9013a1d74973 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -26,8 +26,8 @@ using testing::AssertionResult; namespace Envoy { -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTest, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, AdsIntegrationTest, + ADS_INTEGRATION_PARAMS); // Validate basic config delivery and upgrade. TEST_P(AdsIntegrationTest, Basic) { @@ -487,7 +487,8 @@ TEST_P(AdsIntegrationTest, CdsEdsReplacementWarming) { {buildTlsCluster("cluster_0")}, {}, "2"); // Inconsistent SotW and delta behaviors for warming, see // https://github.com/envoyproxy/envoy/issues/11477#issuecomment-657855029. - if (sotw_or_delta_ != Grpc::SotwOrDelta::Delta) { + // TODO (dmitri-d) this should be remove when legacy mux implementations have been removed. + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"cluster_0"}, {}, {})); } @@ -516,46 +517,6 @@ TEST_P(AdsIntegrationTest, DuplicateInitialClusters) { test_server_->waitForCounterGe("cluster_manager.cds.update_rejected", 1); } -// Validates that removing a redis cluster does not crash Envoy. -// Regression test for issue https://github.com/envoyproxy/envoy/issues/7990. -TEST_P(AdsIntegrationTest, RedisClusterRemoval) { - initialize(); - - // Send initial configuration with a redis cluster and a redis proxy listener. - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); - sendDiscoveryResponse( - Config::TypeUrl::get().Cluster, {buildRedisCluster("redis_cluster")}, - {buildRedisCluster("redis_cluster")}, {}, "1"); - - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", - {"redis_cluster"}, {"redis_cluster"}, {})); - sendDiscoveryResponse( - Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("redis_cluster")}, - {buildClusterLoadAssignment("redis_cluster")}, {}, "1"); - - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); - sendDiscoveryResponse( - Config::TypeUrl::get().Listener, {buildRedisListener("listener_0", "redis_cluster")}, - {buildRedisListener("listener_0", "redis_cluster")}, {}, "1"); - - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", - {"redis_cluster"}, {}, {})); - - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); - - // Validate that redis listener is successfully created. - test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); - - // Now send a CDS update, removing redis cluster added above. - sendDiscoveryResponse( - Config::TypeUrl::get().Cluster, {buildCluster("cluster_2")}, {buildCluster("cluster_2")}, - {"redis_cluster"}, "2"); - - // Validate that the cluster is removed successfully. - test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); -} - // Validate that the request with duplicate clusters in the subsequent requests (warming clusters) // is rejected. TEST_P(AdsIntegrationTest, DuplicateWarmingClusters) { @@ -674,7 +635,8 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); // CDS is resumed and EDS response was acknowledged. - if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { + // TODO (dmitri-d) remove the conditional when legacy mux implementations are removed. + if (sotw_or_delta_ != Grpc::SotwOrDelta::Sotw) { // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2 // ACK goes out, they're both acknowledging version 3. @@ -756,7 +718,8 @@ TEST_P(AdsIntegrationTest, RemoveWarmingCluster) { test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); // CDS is resumed and EDS response was acknowledged. - if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { + // TODO (dmitri-d) remove the conditional when legacy mux implementations are removed. + if (sotw_or_delta_ != Grpc::SotwOrDelta::Sotw) { // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2 // ACK goes out, they're both acknowledging version 3. @@ -1044,13 +1007,20 @@ TEST_P(AdsIntegrationTest, RdsAfterLdsInvalidated) { test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); } -class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, +class AdsFailIntegrationTest : public AdsDeltaSotwIntegrationSubStateParamTest, public HttpIntegrationTest { public: AdsFailIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), - ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + : HttpIntegrationTest( + Http::CodecType::HTTP2, ipVersion(), + ConfigHelper::adsBootstrap((sotwOrDelta() == Grpc::SotwOrDelta::Sotw) || + (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw) + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1059,6 +1029,8 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, void TearDown() override { cleanUpXdsConnection(); } void initialize() override { + config_helper_.addRuntimeOverride("envoy.restart_features.explicit_wildcard_resource", + oldDssOrNewDss() == OldDssOrNewDss::Old ? "false" : "true"); config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* grpc_service = bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services(); @@ -1072,8 +1044,8 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, } }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsFailIntegrationTest, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, AdsFailIntegrationTest, + ADS_INTEGRATION_PARAMS); // Validate that we don't crash on failed ADS stream. TEST_P(AdsFailIntegrationTest, ConnectDisconnect) { @@ -1084,13 +1056,20 @@ TEST_P(AdsFailIntegrationTest, ConnectDisconnect) { xds_stream_->finishGrpcStream(Grpc::Status::Internal); } -class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, +class AdsConfigIntegrationTest : public AdsDeltaSotwIntegrationSubStateParamTest, public HttpIntegrationTest { public: AdsConfigIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), - ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + : HttpIntegrationTest( + Http::CodecType::HTTP2, ipVersion(), + ConfigHelper::adsBootstrap((sotwOrDelta() == Grpc::SotwOrDelta::Sotw) || + (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw) + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1099,6 +1078,8 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, void TearDown() override { cleanUpXdsConnection(); } void initialize() override { + config_helper_.addRuntimeOverride("envoy.restart_features.explicit_wildcard_resource", + oldDssOrNewDss() == OldDssOrNewDss::Old ? "false" : "true"); config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* grpc_service = bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services(); @@ -1121,8 +1102,8 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, } }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsConfigIntegrationTest, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, AdsConfigIntegrationTest, + ADS_INTEGRATION_PARAMS); // This is s regression validating that we don't crash on EDS static Cluster that uses ADS. TEST_P(AdsConfigIntegrationTest, EdsClusterWithAdsConfigSource) { @@ -1228,7 +1209,8 @@ TEST_P(AdsIntegrationTest, NodeMessage) { envoy::service::discovery::v3::DiscoveryRequest sotw_request; envoy::service::discovery::v3::DeltaDiscoveryRequest delta_request; const envoy::config::core::v3::Node* node = nullptr; - if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw || + sotw_or_delta_ == Grpc::SotwOrDelta::UnifiedSotw) { EXPECT_TRUE(xds_stream_->waitForGrpcMessage(*dispatcher_, sotw_request)); EXPECT_TRUE(sotw_request.has_node()); node = &sotw_request.node(); @@ -1269,13 +1251,20 @@ TEST_P(AdsIntegrationTest, SetNodeAlways) { }; // Check if EDS cluster defined in file is loaded before ADS request and used as xDS server -class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, +class AdsClusterFromFileIntegrationTest : public AdsDeltaSotwIntegrationSubStateParamTest, public HttpIntegrationTest { public: AdsClusterFromFileIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), - ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + : HttpIntegrationTest( + Http::CodecType::HTTP2, ipVersion(), + ConfigHelper::adsBootstrap((sotwOrDelta() == Grpc::SotwOrDelta::Sotw) || + (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw) + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1284,6 +1273,8 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam void TearDown() override { cleanUpXdsConnection(); } void initialize() override { + config_helper_.addRuntimeOverride("envoy.restart_features.explicit_wildcard_resource", + oldDssOrNewDss() == OldDssOrNewDss::Old ? "false" : "true"); config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* grpc_service = bootstrap.mutable_dynamic_resources()->mutable_ads_config()->add_grpc_services(); @@ -1335,8 +1326,8 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam } }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterFromFileIntegrationTest, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, AdsClusterFromFileIntegrationTest, + ADS_INTEGRATION_PARAMS); // Validate if ADS cluster defined as EDS will be loaded from file and connection with ADS cluster // will be established. @@ -1398,8 +1389,8 @@ class AdsIntegrationTestWithRtds : public AdsIntegrationTest { } }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtds, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, AdsIntegrationTestWithRtds, + ADS_INTEGRATION_PARAMS); TEST_P(AdsIntegrationTestWithRtds, Basic) { initialize(); @@ -1452,8 +1443,8 @@ class AdsIntegrationTestWithRtdsAndSecondaryClusters : public AdsIntegrationTest } }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtdsAndSecondaryClusters, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDeltaWildcard, + AdsIntegrationTestWithRtdsAndSecondaryClusters, ADS_INTEGRATION_PARAMS); TEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) { initialize(); @@ -1537,19 +1528,20 @@ class XdsTpAdsIntegrationTest : public AdsIntegrationTest { lds_config->mutable_api_config_source()->set_transport_api_version( envoy::config::core::v3::V3); auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); - ads_config->set_set_node_on_first_message_only(true); + ads_config->set_set_node_on_first_message_only(false); }); AdsIntegrationTest::initialize(); } }; INSTANTIATE_TEST_SUITE_P( - IpVersionsClientTypeDelta, XdsTpAdsIntegrationTest, + IpVersionsClientTypeDeltaWildcard, XdsTpAdsIntegrationTest, testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), // There should be no variation across clients. testing::Values(Grpc::ClientType::EnvoyGrpc), // Only delta xDS is supported for XdsTp - testing::Values(Grpc::SotwOrDelta::Delta))); + testing::Values(Grpc::SotwOrDelta::Delta, Grpc::SotwOrDelta::UnifiedDelta), + testing::Values(OldDssOrNewDss::Old, OldDssOrNewDss::New))); TEST_P(XdsTpAdsIntegrationTest, Basic) { initialize(); @@ -1656,4 +1648,489 @@ TEST_P(XdsTpAdsIntegrationTest, Basic) { makeSingleRequest(); } +// Basic CDS/EDS/LEDS update that warms and makes active a single cluster. +TEST_P(XdsTpAdsIntegrationTest, BasicWithLeds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send 2 endpoints. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + const auto endpoint1_name = absl::StrCat(leds_resource_prefix, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name = absl::StrCat(leds_resource_prefix, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name, "2"), buildLbEndpointResource(endpoint2_name, "2")}, + {}); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// CDS/EDS/LEDS update that warms and makes active a single cluster. While +// waiting for LEDS a new EDS update arrives. +TEST_P(XdsTpAdsIntegrationTest, LedsClusterWarmingUpdatingEds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix_foo = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + const auto leds_resource_prefix_bar = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/bar-endpoints/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, + absl::StrCat(leds_resource_prefix_foo, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send an updated EDS response. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix_foo, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, + absl::StrCat(leds_resource_prefix_bar, "*"))}, + {}, "2"); + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // Send the old LEDS response, and ensure it is rejected. + const auto endpoint1_name_foo = + absl::StrCat(leds_resource_prefix_foo, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_foo = + absl::StrCat(leds_resource_prefix_foo, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_foo, "2"), + buildLbEndpointResource(endpoint2_name_foo, "2")}, + {}); + + // Receive the new LEDS request and EDS ack. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix_bar, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {absl::StrCat(leds_resource_prefix_foo, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send the new LEDS response + const auto endpoint1_name_bar = + absl::StrCat(leds_resource_prefix_bar, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_bar = + absl::StrCat(leds_resource_prefix_bar, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_bar, "3"), + buildLbEndpointResource(endpoint2_name_bar, "3")}, + {}); + + // The cluster should be warmed up. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "3", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// CDS/EDS/LEDS update that warms and makes active a single cluster. While +// waiting for LEDS a new CDS update arrives. +TEST_P(XdsTpAdsIntegrationTest, LedsClusterWarmingUpdatingCds) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster1_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "cluster1?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster1_resource = buildCluster(cluster1_name); + const std::string endpoints1_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/cluster1"; + cluster1_resource.mutable_eds_cluster_config()->set_service_name(endpoints1_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster1_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix1 = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints1/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints1_name}, {})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints1_name, + absl::StrCat(leds_resource_prefix1, "*"))}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Receive LEDS request, and send an updated CDS response (removing previous + // cluster and adding a new one). + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix1, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + const std::string cluster2_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "cluster2?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster2_resource = buildCluster(cluster2_name); + const std::string endpoints2_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/cluster2"; + cluster2_resource.mutable_eds_cluster_config()->set_service_name(endpoints2_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster2_resource}, + {cluster1_name}, "2"); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // Send the old LEDS response. + const auto endpoint1_name_cluster1 = absl::StrCat( + leds_resource_prefix1, "endpoint_0", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_cluster1 = absl::StrCat( + leds_resource_prefix1, "endpoint_1", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_cluster1, "2"), + buildLbEndpointResource(endpoint2_name_cluster1, "2")}, + {}); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + const auto leds_resource_prefix2 = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints2/"; + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints2_name}, {endpoints1_name})); + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints2_name, + absl::StrCat(leds_resource_prefix2, "*"))}, + {}, "2"); + + // The server should remove interest in the old LEDS. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, {}, + {absl::StrCat(leds_resource_prefix1, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + + // Receive CDS ack. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "2", {}, {}, {})); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); + + // Receive the new LEDS request and EDS ack. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix2, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send 2 endpoints using LEDS. + const auto endpoint1_name_cluster2 = absl::StrCat( + leds_resource_prefix2, "endpoint_0", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name_cluster2 = absl::StrCat( + leds_resource_prefix2, "endpoint_1", "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name_cluster2, "2"), + buildLbEndpointResource(endpoint2_name_cluster2, "2")}, + {}); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "2", {}, {}, {})); +} + +// Timeout on LEDS update activates the cluster. +TEST_P(XdsTpAdsIntegrationTest, LedsTimeout) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, that uses LEDS. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {}, {endpoints_name}, {})); + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + + auto cla_with_leds = + buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*")); + // Set a short timeout for the initial fetch. + cla_with_leds.mutable_endpoints(0) + ->mutable_leds_cluster_locality_config() + ->mutable_leds_config() + ->mutable_initial_fetch_timeout() + ->set_nanos(100 * 1000 * 1000); + sendDiscoveryResponse( + eds_type_url, {}, {cla_with_leds}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + // Receive LEDS request, and wait for the initial fetch timeout. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + + // The cluster should be warming. Wait until initial fetch timeout. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + test_server_->waitForCounterEq( + "cluster.xdstp_//test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name.leds.init_fetch_timeout", + 1); + + // After timeout the cluster should be active, not warming. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); +} + +// Modifying a cluster to alternate use of EDS with and without LEDS. +TEST_P(XdsTpAdsIntegrationTest, EdsAlternatingLedsUsage) { + initialize(); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); + const auto leds_type_url = Config::getTypeUrl(); + + // Receive CDS request, and send a cluster with EDS. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, + {"xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {}, true)); + const std::string cluster_name = "xdstp://test/envoy.config.cluster.v3.Cluster/foo-cluster/" + "baz?xds.node.cluster=cluster_name&xds.node.id=node_name"; + auto cluster_resource = buildCluster(cluster_name); + const std::string endpoints_name = + "xdstp://test/envoy.config.endpoint.v3.ClusterLoadAssignment/foo-cluster/baz"; + cluster_resource.mutable_eds_cluster_config()->set_service_name(endpoints_name); + sendDiscoveryResponse(cds_type_url, {}, {cluster_resource}, + {}, "1"); + + // Receive EDS request, and send ClusterLoadAssignment with one locality, + // that doesn't use LEDS. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", {}, + {endpoints_name}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {}, + {buildClusterLoadAssignment(endpoints_name)}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "1", {}, {}, {})); + + // LDS/RDS xDS initialization (LDS via xdstp:// glob collection) + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, + {"xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "*?xds.node.cluster=cluster_name&xds.node.id=node_name"}, + {})); + const std::string route_name_0 = + "xdstp://test/envoy.config.route.v3.RouteConfiguration/route_config_0"; + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {}, + {buildListener("xdstp://test/envoy.config.listener.v3.Listener/foo-listener/" + "bar?xds.node.cluster=cluster_name&xds.node.id=node_name", + route_name_0)}, + {}, "1"); + + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", {}, + {route_name_0}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {}, {buildRouteConfig(route_name_0, cluster_name)}, + {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", {}, {}, {})); + + test_server_->waitForCounterEq("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send a new EDS update that uses LEDS. + const auto leds_resource_prefix = + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/"; + sendDiscoveryResponse( + eds_type_url, {}, + {buildClusterLoadAssignmentWithLeds(endpoints_name, absl::StrCat(leds_resource_prefix, "*"))}, + {}, "2"); + + // Receive LEDS request. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, + {absl::StrCat(leds_resource_prefix, "*?xds.node.cluster=cluster_name&xds.node.id=node_name")}, + {})); + + // Make sure that traffic can still be sent to the endpoint (still using the + // EDS without LEDS). + makeSingleRequest(); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {}, {}, {})); + + // Send LEDS response with 2 endpoints. + const auto endpoint1_name = absl::StrCat(leds_resource_prefix, "endpoint_0", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + const auto endpoint2_name = absl::StrCat(leds_resource_prefix, "endpoint_1", + "?xds.node.cluster=cluster_name&xds.node.id=node_name"); + sendExplicitResourcesDeltaDiscoveryResponse( + Config::TypeUrl::get().LbEndpoint, + {buildLbEndpointResource(endpoint1_name, "1"), buildLbEndpointResource(endpoint2_name, "1")}, + {}); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "1", {}, {}, {})); + + // Make sure that traffic can still be sent to the endpoint (now using the + // EDS with LEDS). + makeSingleRequest(); + + // Send a new EDS update that doesn't use LEDS. + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {}, + {buildClusterLoadAssignment(endpoints_name)}, {}, "3"); + + // The server should remove interest in the old LEDS. + EXPECT_TRUE(compareDiscoveryRequest( + leds_type_url, "", {}, {}, + {absl::StrCat(leds_resource_prefix, + "*?xds.node.cluster=cluster_name&xds.node.id=node_name")})); + + // Receive the EDS ack. + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "3", {}, {}, {})); + + // Remove the LEDS endpoints. + sendExplicitResourcesDeltaDiscoveryResponse(Config::TypeUrl::get().LbEndpoint, {}, + {endpoint1_name, endpoint2_name}); + + // Receive the LEDS ack. + EXPECT_TRUE(compareDiscoveryRequest(leds_type_url, "3", {}, {}, {})); + + // Make sure that traffic can still be sent to the endpoint (now using the + // EDS without LEDS). + makeSingleRequest(); +} + } // namespace Envoy diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 3caf9f43b3ec..831beb77ac00 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -17,7 +17,6 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" -#include "source/common/common/thread.h" #include "source/common/config/api_version.h" #include "source/common/event/libevent.h" #include "source/common/network/utility.h" @@ -94,7 +93,6 @@ Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnectionWithOption } void BaseIntegrationTest::initialize() { - Thread::MainThread::initTestThread(); RELEASE_ASSERT(!initialized_, ""); RELEASE_ASSERT(Event::Libevent::Global::initialized(), ""); initialized_ = true; @@ -488,13 +486,14 @@ AssertionResult BaseIntegrationTest::compareDiscoveryRequest( const std::vector& expected_resource_names_added, const std::vector& expected_resource_names_removed, bool expect_node, const Protobuf::int32 expected_error_code, const std::string& expected_error_substring) { - if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw || + sotw_or_delta_ == Grpc::SotwOrDelta::UnifiedSotw) { return compareSotwDiscoveryRequest(expected_type_url, expected_version, expected_resource_names, expect_node, expected_error_code, expected_error_substring); } else { return compareDeltaDiscoveryRequest(expected_type_url, expected_resource_names_added, expected_resource_names_removed, expected_error_code, - expected_error_substring); + expected_error_substring, expect_node); } } @@ -574,16 +573,33 @@ AssertionResult BaseIntegrationTest::waitForPortAvailable(uint32_t port, return AssertionFailure() << "Timeout waiting for port availability"; } +envoy::service::discovery::v3::DeltaDiscoveryResponse +BaseIntegrationTest::createExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed) { + envoy::service::discovery::v3::DeltaDiscoveryResponse response; + response.set_system_version_info("system_version_info_this_is_a_test"); + response.set_type_url(type_url); + *response.mutable_resources() = {added_or_updated.begin(), added_or_updated.end()}; + *response.mutable_removed_resources() = {removed.begin(), removed.end()}; + static int next_nonce_counter = 0; + response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); + return response; +} + AssertionResult BaseIntegrationTest::compareDeltaDiscoveryRequest( const std::string& expected_type_url, const std::vector& expected_resource_subscriptions, const std::vector& expected_resource_unsubscriptions, FakeStreamPtr& xds_stream, - const Protobuf::int32 expected_error_code, const std::string& expected_error_substring) { + const Protobuf::int32 expected_error_code, const std::string& expected_error_substring, + bool expect_node) { envoy::service::discovery::v3::DeltaDiscoveryRequest request; VERIFY_ASSERTION(xds_stream->waitForGrpcMessage(*dispatcher_, request)); // Verify all we care about node. - if (!request.has_node() || request.node().id().empty() || request.node().cluster().empty()) { + if (expect_node && + (!request.has_node() || request.node().id().empty() || request.node().cluster().empty())) { return AssertionFailure() << "Weird node field"; } last_node_.CopyFrom(request.node()); diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index 6b7039c34da2..528b1c0cc2af 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -9,6 +9,7 @@ #include "envoy/server/process_context.h" #include "envoy/service/discovery/v3/discovery.pb.h" +#include "source/common/common/thread.h" #include "source/common/config/api_version.h" #include "source/extensions/transport_sockets/tls/context_manager_impl.h" @@ -144,7 +145,8 @@ class BaseIntegrationTest : protected Logger::Loggable { void sendDiscoveryResponse(const std::string& type_url, const std::vector& state_of_the_world, const std::vector& added_or_updated, const std::vector& removed, const std::string& version) { - if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw || + sotw_or_delta_ == Grpc::SotwOrDelta::UnifiedSotw) { sendSotwDiscoveryResponse(type_url, state_of_the_world, version); } else { sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version); @@ -156,10 +158,10 @@ class BaseIntegrationTest : protected Logger::Loggable { const std::vector& expected_resource_subscriptions, const std::vector& expected_resource_unsubscriptions, const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, - const std::string& expected_error_message = "") { + const std::string& expected_error_message = "", bool expect_node = true) { return compareDeltaDiscoveryRequest(expected_type_url, expected_resource_subscriptions, expected_resource_unsubscriptions, xds_stream_, - expected_error_code, expected_error_message); + expected_error_code, expected_error_message, expect_node); } AssertionResult compareDeltaDiscoveryRequest( @@ -167,7 +169,7 @@ class BaseIntegrationTest : protected Logger::Loggable { const std::vector& expected_resource_subscriptions, const std::vector& expected_resource_unsubscriptions, FakeStreamPtr& stream, const Protobuf::int32 expected_error_code = Grpc::Status::WellKnownGrpcStatus::Ok, - const std::string& expected_error_message = ""); + const std::string& expected_error_message = "", bool expect_node = true); AssertionResult compareSotwDiscoveryRequest( const std::string& expected_type_url, const std::string& expected_version, @@ -205,29 +207,41 @@ class BaseIntegrationTest : protected Logger::Loggable { stream->sendGrpcMessage(response); } + // Sends a DeltaDiscoveryResponse with a given list of added resources. + // Note that the resources are expected to be of the same type, and match type_url. + void sendExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed) { + xds_stream_->sendGrpcMessage( + createExplicitResourcesDeltaDiscoveryResponse(type_url, added_or_updated, removed)); + } + + envoy::service::discovery::v3::DeltaDiscoveryResponse + createExplicitResourcesDeltaDiscoveryResponse( + const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed); + template envoy::service::discovery::v3::DeltaDiscoveryResponse createDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, const std::vector& aliases) { - envoy::service::discovery::v3::DeltaDiscoveryResponse response; - response.set_system_version_info("system_version_info_this_is_a_test"); - response.set_type_url(type_url); + std::vector resources; for (const auto& message : added_or_updated) { - auto* resource = response.add_resources(); + envoy::service::discovery::v3::Resource resource; ProtobufWkt::Any temp_any; temp_any.PackFrom(message); - resource->mutable_resource()->PackFrom(message); - resource->set_name(intResourceName(message)); - resource->set_version(version); + resource.mutable_resource()->PackFrom(message); + resource.set_name(intResourceName(message)); + resource.set_version(version); for (const auto& alias : aliases) { - resource->add_aliases(alias); + resource.add_aliases(alias); } + resources.emplace_back(resource); } - *response.mutable_removed_resources() = {removed.begin(), removed.end()}; - static int next_nonce_counter = 0; - response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); - return response; + return createExplicitResourcesDeltaDiscoveryResponse(type_url, resources, removed); } private: @@ -254,8 +268,10 @@ class BaseIntegrationTest : protected Logger::Loggable { * * @param port the port to connect to. * @param raw_http the data to send. - * @param response the response data will be sent here - * @param if the connection should be terminated once '\r\n\r\n' has been read. + * @param response the response data will be sent here. + * @param disconnect_after_headers_complete if the connection should be terminated once "\r\n\r\n" + * has been read. + * @param transport_socket the transport socket of the created client connection. **/ void sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete = false, @@ -346,9 +362,14 @@ class BaseIntegrationTest : protected Logger::Loggable { void mergeOptions(envoy::config::core::v3::Http2ProtocolOptions& options) { upstream_config_.http2_options_.MergeFrom(options); } + void mergeOptions(envoy::config::listener::v3::QuicProtocolOptions& options) { + upstream_config_.quic_options_.MergeFrom(options); + } std::unique_ptr upstream_stats_store_; + Thread::TestThread test_thread_; + // Make sure the test server will be torn down after any fake client. // The test server owns the runtime, which is often accessed by client and // fake upstream codecs and must outlast them. diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index bbffa27e9569..df05bec16f24 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -34,7 +34,14 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht CdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), ConfigHelper::discoveredClustersBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + sotwOrDelta() == Grpc::SotwOrDelta::Sotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); } @@ -106,7 +113,7 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht EXPECT_TRUE(xds_stream_->waitForHeadersComplete()); Envoy::Http::LowerCaseString path_string(":path"); std::string expected_method( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw + sotwOrDelta() == Grpc::SotwOrDelta::Sotw || sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw ? "/envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters" : "/envoy.service.cluster.v3.ClusterDiscoveryService/DeltaClusters"); EXPECT_EQ(xds_stream_->headers().get(path_string)[0]->value(), expected_method); @@ -261,6 +268,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { // resources it already has: the reconnected stream need not start with a state-of-the-world update. TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) { SKIP_IF_XDS_IS(Grpc::SotwOrDelta::Sotw); + SKIP_IF_XDS_IS(Grpc::SotwOrDelta::UnifiedSotw); // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index 65b84f28bf0e..f0abdb4dbcbd 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -5,13 +5,12 @@ namespace { using DrainCloseIntegrationTest = HttpProtocolIntegrationTest; -// Add a health check filter and verify correct behavior when draining. TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { + autonomous_upstream_ = true; // The probability of drain close increases over time. With a high timeout, // the probability will be very low, but the rapid retries prevent this from // increasing total test time. drain_time_ = std::chrono::seconds(100); - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; @@ -43,9 +42,9 @@ TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { } TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { + autonomous_upstream_ = true; drain_strategy_ = Server::DrainStrategy::Immediate; drain_time_ = std::chrono::seconds(100); - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 9ba6ef3333ac..2f33f3c3f519 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -510,7 +510,7 @@ FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, Network::SocketPtr&& listen_socket, const FakeUpstreamConfig& config) : http_type_(config.upstream_protocol_), http2_options_(config.http2_options_), - http3_options_(config.http3_options_), + http3_options_(config.http3_options_), quic_options_(config.quic_options_), socket_(Network::SocketSharedPtr(listen_socket.release())), socket_factory_(std::make_unique(socket_)), api_(Api::createApiForTest(stats_store_)), time_system_(config.time_system_), @@ -729,9 +729,11 @@ testing::AssertionResult FakeUpstream::waitForUdpDatagram(Network::UdpRecvData& return AssertionSuccess(); } -void FakeUpstream::onRecvDatagram(Network::UdpRecvData& data) { +Network::FilterStatus FakeUpstream::onRecvDatagram(Network::UdpRecvData& data) { absl::MutexLock lock(&lock_); received_datagrams_.emplace_back(std::move(data)); + + return Network::FilterStatus::StopIteration; } AssertionResult FakeUpstream::runOnDispatcherThreadAndWait(std::function cb, diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 54a5f56f531e..f667bcb2a225 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -212,7 +212,7 @@ class FakeStream : public Http::RequestDecoder, // Http::RequestDecoder void decodeHeaders(Http::RequestHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(Http::RequestTrailerMapPtr&& trailers) override; - const StreamInfo::StreamInfo& streamInfo() const override { + StreamInfo::StreamInfo& streamInfo() override { RELEASE_ASSERT(false, "initialize if this is needed"); return *stream_info_; } @@ -579,6 +579,7 @@ struct FakeUpstreamConfig { absl::optional udp_fake_upstream_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; + envoy::config::listener::v3::QuicProtocolOptions quic_options_; uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -723,8 +724,12 @@ class FakeUpstream : Logger::Loggable, : UdpListenerReadFilter(callbacks), parent_(parent) {} // Network::UdpListenerReadFilter - void onData(Network::UdpRecvData& data) override { parent_.onRecvDatagram(data); } - void onReceiveError(Api::IoError::IoErrorCode) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + Network::FilterStatus onData(Network::UdpRecvData& data) override { + return parent_.onRecvDatagram(data); + } + Network::FilterStatus onReceiveError(Api::IoError::IoErrorCode) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } private: FakeUpstream& parent_; @@ -756,7 +761,9 @@ class FakeUpstream : Logger::Loggable, if (is_quic) { #if defined(ENVOY_ENABLE_QUIC) udp_listener_config_.listener_factory_ = std::make_unique( - envoy::config::listener::v3::QuicProtocolOptions(), 1, parent_.quic_stat_names_); + parent_.quic_options_, 1, parent_.quic_stat_names_); + // Initialize QUICHE flags. + quiche::FlagRegistry::getInstance(); #else ASSERT(false, "Running a test that requires QUIC without compiling QUIC"); #endif @@ -810,13 +817,14 @@ class FakeUpstream : Logger::Loggable, void threadRoutine(); SharedConnectionWrapper& consumeConnection() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_); - void onRecvDatagram(Network::UdpRecvData& data); + Network::FilterStatus onRecvDatagram(Network::UdpRecvData& data); AssertionResult runOnDispatcherThreadAndWait(std::function cb, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); const envoy::config::core::v3::Http2ProtocolOptions http2_options_; const envoy::config::core::v3::Http3ProtocolOptions http3_options_; + envoy::config::listener::v3::QuicProtocolOptions quic_options_; Network::SocketSharedPtr socket_; Network::ListenSocketFactoryPtr socket_factory_; ConditionalInitializer server_initialized_; diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 79e08f2533a4..95d9735eadbd 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -618,3 +618,19 @@ envoy_cc_test_library( "@com_google_absl//absl/strings:str_format", ], ) + +envoy_cc_test_library( + name = "listener_typed_metadata_filter_lib", + srcs = [ + "listener_typed_metadata_filter.cc", + ], + deps = [ + "//envoy/http:filter_interface", + "//envoy/network:listener_interface", + "//envoy/registry", + "//source/common/protobuf", + "//source/extensions/filters/http/common:factory_base_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) diff --git a/test/integration/filters/address_restore_listener_filter.cc b/test/integration/filters/address_restore_listener_filter.cc index 84cd33d2c14e..ed1605924d8d 100644 --- a/test/integration/filters/address_restore_listener_filter.cc +++ b/test/integration/filters/address_restore_listener_filter.cc @@ -10,14 +10,22 @@ namespace Envoy { // The FakeOriginalDstListenerFilter restore desired local address without the dependency of OS. +// Ipv6 and Ipv4 addresses are restored to the corresponding loopback ip address and port 80. class FakeOriginalDstListenerFilter : public Network::ListenerFilter { public: // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override { FANCY_LOG(debug, "in FakeOriginalDstListenerFilter::onAccept"); Network::ConnectionSocket& socket = cb.socket(); - socket.connectionInfoProvider().restoreLocalAddress( - std::make_shared("127.0.0.2", 80)); + auto local_address = socket.connectionInfoProvider().localAddress(); + if (local_address != nullptr && + local_address->ip()->version() == Network::Address::IpVersion::v6) { + socket.connectionInfoProvider().restoreLocalAddress( + std::make_shared("::1", 80)); + } else { + socket.connectionInfoProvider().restoreLocalAddress( + std::make_shared("127.0.0.1", 80)); + } FANCY_LOG(debug, "current local socket address is {} restored = {}", socket.connectionInfoProvider().localAddress()->asString(), socket.connectionInfoProvider().localAddressRestored()); diff --git a/test/integration/filters/listener_typed_metadata_filter.cc b/test/integration/filters/listener_typed_metadata_filter.cc new file mode 100644 index 000000000000..d8744b0dd9b0 --- /dev/null +++ b/test/integration/filters/listener_typed_metadata_filter.cc @@ -0,0 +1,80 @@ +#include "envoy/http/filter.h" +#include "envoy/network/listener.h" +#include "envoy/registry/registry.h" + +#include "source/common/protobuf/protobuf.h" +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +constexpr absl::string_view kFilterName = "listener-typed-metadata-filter"; +constexpr absl::string_view kMetadataKey = "test.listener.typed.metadata"; +constexpr absl::string_view kExpectedMetadataValue = "hello world"; + +// A test filter that verifies the typed metadata attached to the listener is stored correctly. +class Baz : public Config::TypedMetadata::Object { +public: + std::string item_; +}; + +class BazTypedMetadataFactory : public Network::ListenerTypedMetadataFactory { +public: + std::string name() const override { return std::string(kMetadataKey); } + + std::unique_ptr + parse(const ProtobufWkt::Struct&) const override { + ADD_FAILURE() << "Filter should not parse struct-typed metadata."; + return nullptr; + } + std::unique_ptr + parse(const ProtobufWkt::Any& d) const override { + ProtobufWkt::StringValue v; + EXPECT_TRUE(d.UnpackTo(&v)); + auto object = std::make_unique(); + object->item_ = v.value(); + return object; + } +}; + +class ListenerTypedMetadataFilter : public Http::PassThroughFilter { +public: + ListenerTypedMetadataFilter() = default; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->sendLocalReply(Envoy::Http::Code::OK, "", nullptr, absl::nullopt, + "Successfully handled request."); + return Http::FilterHeadersStatus::Continue; + } +}; + +class ListenerTypedMetadataFilterFactory + : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { +public: + ListenerTypedMetadataFilterFactory() : EmptyHttpFilterConfig(std::string(kFilterName)) {} + +private: + Http::FilterFactoryCb createFilter(const std::string&, + Server::Configuration::FactoryContext& context) override { + + // Main assertions to ensure the metadata from the listener was parsed correctly. + const auto& typed_metadata = context.listenerTypedMetadata(); + const Baz* value = typed_metadata.get(std::string(kMetadataKey)); + EXPECT_NE(value, nullptr); + EXPECT_EQ(value->item_, kExpectedMetadataValue); + + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared()); + }; + } +}; + +REGISTER_FACTORY(BazTypedMetadataFactory, Network::ListenerTypedMetadataFactory); +REGISTER_FACTORY(ListenerTypedMetadataFilterFactory, + Server::Configuration::NamedHttpFilterConfigFactory); +} // namespace +} // namespace Envoy diff --git a/test/integration/health_check_integration_test.cc b/test/integration/health_check_integration_test.cc index dcd6efdee915..ed44be1f0595 100644 --- a/test/integration/health_check_integration_test.cc +++ b/test/integration/health_check_integration_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/type/v3/range.pb.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/common/http/http2/http2_frame.h" @@ -170,7 +171,8 @@ class HttpHealthCheckIntegrationTestBase // Adds a HTTP active health check specifier to the given cluster, and waits for the first health // check probe to be received. - void initHttpHealthCheck(uint32_t cluster_idx) { + void initHttpHealthCheck(uint32_t cluster_idx, int unhealthy_threshold = 1, + std::unique_ptr retriable_range = nullptr) { const envoy::type::v3::CodecClientType codec_client_type = (Http::CodecType::HTTP1 == upstream_protocol_) ? envoy::type::v3::CodecClientType::HTTP1 : envoy::type::v3::CodecClientType::HTTP2; @@ -179,6 +181,12 @@ class HttpHealthCheckIntegrationTestBase auto* health_check = addHealthCheck(cluster_data.cluster_); health_check->mutable_http_health_check()->set_path("/healthcheck"); health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type); + health_check->mutable_unhealthy_threshold()->set_value(unhealthy_threshold); + if (retriable_range != nullptr) { + auto* range = health_check->mutable_http_health_check()->add_retriable_statuses(); + range->set_start(retriable_range->start()); + range->set_end(retriable_range->end()); + } // Introduce the cluster using compareDiscoveryRequest / sendDiscoveryResponse. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); @@ -246,6 +254,117 @@ TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyHttp) { EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); } +// Tests that a retriable status response does not mark endpoint unhealthy until threshold is +// reached +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointUnhealthyThresholdHttp) { + const uint32_t cluster_idx = 0; + initialize(); + auto retriable_range = std::make_unique(); + retriable_range->set_start(400); + retriable_range->set_end(401); + initHttpHealthCheck(cluster_idx, 2, std::move(retriable_range)); + + // Responds with healthy status. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 1); + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 2); + + // Respond with retriable status + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "400"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for second health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.failure", 1); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_healthy")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 3); + + // Respond with retriable status a second time, matching unhealthy threshold + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "400"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for third health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.failure", 2); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.health_check.success")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 0); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); + + // Wait until the next attempt is made. + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 4); + + // Respond with healthy status again. + ASSERT_TRUE(clusters_[cluster_idx].host_fake_connection_->waitForNewStream( + *dispatcher_, clusters_[cluster_idx].host_stream_)); + ASSERT_TRUE(clusters_[cluster_idx].host_stream_->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(clusters_[cluster_idx].host_stream_->headers().getHostValue(), + clusters_[cluster_idx].name_); + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for fourth health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 2); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); +} + +// Tests that expected statuses takes precedence over retriable statuses +TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointExpectedAndRetriablePrecedence) { + const uint32_t cluster_idx = 0; + initialize(); + auto retriable_range = std::make_unique(); + retriable_range->set_start(200); + retriable_range->set_end(201); + initHttpHealthCheck(cluster_idx, 2, std::move(retriable_range)); + + // Responds with healthy status. + clusters_[cluster_idx].host_stream_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + clusters_[cluster_idx].host_stream_->encodeData(0, true); + + // Wait for health check + test_server_->waitForCounterEq("cluster.cluster_1.health_check.attempt", 1); + test_server_->waitForCounterEq("cluster.cluster_1.health_check.success", 1); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_1.health_check.failure")->value()); + test_server_->waitForGaugeEq("cluster.cluster_1.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_1.membership_total")->value()); +} + // Verify that immediate health check fail causes cluster exclusion. TEST_P(HttpHealthCheckIntegrationTest, SingleEndpointImmediateHealthcheckFailHttp) { const uint32_t cluster_idx = 0; diff --git a/test/integration/http2_flood_integration_test.cc b/test/integration/http2_flood_integration_test.cc index a5c9d864fb7d..ddd9810a86b9 100644 --- a/test/integration/http2_flood_integration_test.cc +++ b/test/integration/http2_flood_integration_test.cc @@ -1499,23 +1499,7 @@ TEST_P(Http2FloodMitigationTest, UpstreamFloodDetectionIsOnByDefault) { "cluster.cluster_0.http2.outbound_control_flood"); } -class Http2ManyStreamsTest - : public testing::TestWithParam>, - public Http2RawFrameIntegrationTest { -protected: - Http2ManyStreamsTest() : Http2RawFrameIntegrationTest(std::get<0>(GetParam())) { - config_helper_.addRuntimeOverride("envoy.reloadable_features.improved_stream_limit_handling", - useImprovedStreamLimitHandling() ? "true" : "false"); - } - - bool useImprovedStreamLimitHandling() const { return std::get<1>(GetParam()); } -}; - -INSTANTIATE_TEST_SUITE_P( - IpVersionsAndRuntimeFeature, Http2ManyStreamsTest, - testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool())); - -TEST_P(Http2ManyStreamsTest, UpstreamRstStreamStormOnDownstreamCloseRegressionTest) { +TEST_P(Http2FloodMitigationTest, UpstreamRstStreamStormOnDownstreamCloseRegressionTest) { const uint32_t num_requests = 80; envoy::config::core::v3::Http2ProtocolOptions config; @@ -1555,8 +1539,7 @@ TEST_P(Http2ManyStreamsTest, UpstreamRstStreamStormOnDownstreamCloseRegressionTe // The disconnect shouldn't trigger an outbound control frame flood. EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.http2.outbound_control_flood")->value()); // Verify that the upstream connections are still active. - EXPECT_EQ(useImprovedStreamLimitHandling() ? 2 : 1, - test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); } } // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 771e2ac186ee..711ca5d1bddf 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -433,11 +433,15 @@ void HttpIntegrationTest::cleanupUpstreamAndDownstream() { void HttpIntegrationTest::sendRequestAndVerifyResponse( const Http::TestRequestHeaderMapImpl& request_headers, const int request_size, const Http::TestResponseHeaderMapImpl& response_headers, const int response_size, - const int backend_idx) { + const int backend_idx, + absl::optional expected_response_headers) { codec_client_ = makeHttpConnection(lookupPort("http")); auto response = sendRequestAndWaitForResponse(request_headers, request_size, response_headers, response_size, backend_idx); - verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + verifyResponse(std::move(response), "200", + (expected_response_headers.has_value()) ? *expected_response_headers + : response_headers, + std::string(response_size, 'a')); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(request_size, upstream_request_->bodyLength()); @@ -1414,6 +1418,63 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecType admin_request_type) { } } +void HttpIntegrationTest::simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, + uint32_t response1_bytes, uint32_t response2_bytes) { + FakeStreamPtr upstream_request1; + FakeStreamPtr upstream_request2; + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Start request 1 + auto encoder_decoder1 = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + Http::RequestEncoder* encoder1 = &encoder_decoder1.first; + auto response1 = std::move(encoder_decoder1.second); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request1)); + + // Start request 2 + auto encoder_decoder2 = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + Http::RequestEncoder* encoder2 = &encoder_decoder2.first; + auto response2 = std::move(encoder_decoder2.second); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2)); + + // Finish request 1 + codec_client_->sendData(*encoder1, request1_bytes, true); + ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_)); + + // Finish request 2 + codec_client_->sendData(*encoder2, request2_bytes, true); + ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_)); + + // Respond to request 2 + upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request2->encodeData(response2_bytes, true); + ASSERT_TRUE(response2->waitForEndStream()); + EXPECT_TRUE(upstream_request2->complete()); + EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); + EXPECT_TRUE(response2->complete()); + EXPECT_EQ("200", response2->headers().getStatusValue()); + EXPECT_EQ(response2_bytes, response2->body().size()); + + // Respond to request 1 + upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request1->encodeData(response1_bytes, true); + ASSERT_TRUE(response1->waitForEndStream()); + EXPECT_TRUE(upstream_request1->complete()); + EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); + EXPECT_TRUE(response1->complete()); + EXPECT_EQ("200", response1->headers().getStatusValue()); + EXPECT_EQ(response1_bytes, response1->body().size()); +} + std::string HttpIntegrationTest::downstreamProtocolStatsRoot() const { switch (downstreamProtocol()) { case Http::CodecClient::Type::HTTP1: diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index d6ba99bf7a02..01327b015a92 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -186,7 +186,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { void sendRequestAndVerifyResponse(const Http::TestRequestHeaderMapImpl& request_headers, const int request_size, const Http::TestResponseHeaderMapImpl& response_headers, - const int response_size, const int backend_idx); + const int response_size, const int backend_idx, + absl::optional + expected_response_headers = absl::nullopt); // Check for completion of upstream_request_, and a simple "200" response. void checkSimpleRequestSuccess(uint64_t expected_request_size, uint64_t expected_response_size, @@ -245,6 +247,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testEnvoyProxying1xx(bool continue_before_upstream_complete = false, bool with_encoder_filter = false, bool with_multiple_1xx_headers = false); + void simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, + uint32_t response1_bytes, uint32_t response2_bytes); // HTTP/2 client tests. void testDownstreamResetBeforeResponseComplete(); diff --git a/test/integration/http_protocol_integration.cc b/test/integration/http_protocol_integration.cc index 0538a024d254..72dfbac95d8a 100644 --- a/test/integration/http_protocol_integration.cc +++ b/test/integration/http_protocol_integration.cc @@ -58,4 +58,72 @@ std::string HttpProtocolIntegrationTest::protocolTestParamsToString( upstreamToString(params.param.upstream_protocol)); } +void HttpProtocolIntegrationTest::expectUpstreamBytesSentAndReceived( + BytesCountExpectation h1_expectation, BytesCountExpectation h2_expectation, const int id) { + auto integer_near = [](int x, int y) -> bool { return std::abs(x - y) <= (x / 20); }; + std::string access_log = waitForAccessLog(access_log_name_, id); + std::vector log_entries = absl::StrSplit(access_log, ' '); + int wire_bytes_sent = std::stoi(log_entries[0]), wire_bytes_received = std::stoi(log_entries[1]), + header_bytes_sent = std::stoi(log_entries[2]), + header_bytes_received = std::stoi(log_entries[3]); + if (upstreamProtocol() == Http::CodecType::HTTP1) { + EXPECT_EQ(h1_expectation.wire_bytes_sent_, wire_bytes_sent) + << "expect: " << h1_expectation.wire_bytes_sent_ << ", actual: " << wire_bytes_sent; + EXPECT_EQ(h1_expectation.wire_bytes_received_, wire_bytes_received) + << "expect: " << h1_expectation.wire_bytes_received_ << ", actual: " << wire_bytes_received; + EXPECT_EQ(h1_expectation.header_bytes_sent_, header_bytes_sent) + << "expect: " << h1_expectation.header_bytes_sent_ << ", actual: " << header_bytes_sent; + EXPECT_EQ(h1_expectation.header_bytes_received_, header_bytes_received) + << "expect: " << h1_expectation.header_bytes_received_ + << ", actual: " << header_bytes_received; + } + if (upstreamProtocol() == Http::CodecType::HTTP2) { + // Because of non-deterministic h2 compression, the same plain text length don't map to the + // same number of wire bytes. + EXPECT_TRUE(integer_near(h2_expectation.wire_bytes_sent_, wire_bytes_sent)) + << "expect: " << h2_expectation.wire_bytes_sent_ << ", actual: " << wire_bytes_sent; + EXPECT_TRUE(integer_near(h2_expectation.wire_bytes_received_, wire_bytes_received)) + << "expect: " << h2_expectation.wire_bytes_received_ << ", actual: " << wire_bytes_received; + EXPECT_TRUE(integer_near(h2_expectation.header_bytes_sent_, header_bytes_sent)) + << "expect: " << h2_expectation.header_bytes_sent_ << ", actual: " << header_bytes_sent; + EXPECT_TRUE(integer_near(h2_expectation.header_bytes_received_, header_bytes_received)) + << "expect: " << h2_expectation.header_bytes_received_ + << ", actual: " << header_bytes_received; + } +} + +void HttpProtocolIntegrationTest::expectDownstreamBytesSentAndReceived( + BytesCountExpectation h1_expectation, BytesCountExpectation h2_expectation, const int id) { + auto integer_near = [](int x, int y) -> bool { return std::abs(x - y) <= (x / 10); }; + std::string access_log = waitForAccessLog(access_log_name_, id); + std::vector log_entries = absl::StrSplit(access_log, ' '); + int wire_bytes_sent = std::stoi(log_entries[0]), wire_bytes_received = std::stoi(log_entries[1]), + header_bytes_sent = std::stoi(log_entries[2]), + header_bytes_received = std::stoi(log_entries[3]); + if (downstreamProtocol() == Http::CodecType::HTTP1) { + EXPECT_TRUE(integer_near(h1_expectation.wire_bytes_sent_, wire_bytes_sent)) + << "expect: " << h1_expectation.wire_bytes_sent_ << ", actual: " << wire_bytes_sent; + EXPECT_EQ(h1_expectation.wire_bytes_received_, wire_bytes_received) + << "expect: " << h1_expectation.wire_bytes_received_ << ", actual: " << wire_bytes_received; + EXPECT_TRUE(integer_near(h1_expectation.header_bytes_sent_, header_bytes_sent)) + << "expect: " << h1_expectation.header_bytes_sent_ << ", actual: " << header_bytes_sent; + EXPECT_EQ(h1_expectation.header_bytes_received_, header_bytes_received) + << "expect: " << h1_expectation.header_bytes_received_ + << ", actual: " << header_bytes_received; + } + if (downstreamProtocol() == Http::CodecType::HTTP2) { + // Because of non-deterministic h2 compression, the same plain text length don't map to the + // same number of wire bytes. + EXPECT_TRUE(integer_near(h2_expectation.wire_bytes_sent_, wire_bytes_sent)) + << "expect: " << h2_expectation.wire_bytes_sent_ << ", actual: " << wire_bytes_sent; + EXPECT_TRUE(integer_near(h2_expectation.wire_bytes_received_, wire_bytes_received)) + << "expect: " << h2_expectation.wire_bytes_received_ << ", actual: " << wire_bytes_received; + EXPECT_TRUE(integer_near(h2_expectation.header_bytes_sent_, header_bytes_sent)) + << "expect: " << h2_expectation.header_bytes_sent_ << ", actual: " << header_bytes_sent; + EXPECT_TRUE(integer_near(h2_expectation.header_bytes_received_, header_bytes_received)) + << "expect: " << h2_expectation.header_bytes_received_ + << ", actual: " << header_bytes_received; + } +} + } // namespace Envoy diff --git a/test/integration/http_protocol_integration.h b/test/integration/http_protocol_integration.h index d66f53692919..ba47a134d948 100644 --- a/test/integration/http_protocol_integration.h +++ b/test/integration/http_protocol_integration.h @@ -58,6 +58,24 @@ class HttpProtocolIntegrationTest : public testing::TestWithParamset_value(false); + test::integration::filters::SetResponseCodeFilterConfig response_code; + response_code.set_code(403); - // The http health_check filter doesn't support per filter config. So specify one - // and expect the exception will be raised. auto* virtual_host = hcm.mutable_route_config()->mutable_virtual_hosts(0); auto* config = virtual_host->mutable_typed_per_filter_config(); - (*config)["envoy.filters.http.health_check"].PackFrom(health_check); + (*config)["set-response-code-filter"].PackFrom(response_code); auto* filter = hcm.mutable_http_filters()->Add(); - filter->set_name("envoy.filters.http.health_check"); - filter->mutable_typed_config()->PackFrom(health_check); + filter->set_name("set-response-code-filter"); + filter->mutable_typed_config()->PackFrom(response_code); // keep router the last auto size = hcm.http_filters_size(); hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); }); - EXPECT_DEATH(initialize(), "The filter envoy.filters.http.health_check doesn't support virtual " - "host-specific configurations"); + EXPECT_DEATH( + initialize(), + "The filter set-response-code-filter doesn't support virtual host-specific configurations"); } TEST_F(HTTPTypedPerFilterConfigTest, RejectUnknownHttpFilterInTypedPerFilterConfig) { diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 6d246bf90e5b..0acf5ef9515c 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -203,6 +203,36 @@ TEST_P(IdleTimeoutIntegrationTest, IdleTimeoutWithTwoRequests) { test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_idle_timeout", 1); } +// Max connection duration reached after a connection is created. +TEST_P(IdleTimeoutIntegrationTest, MaxConnectionDurationBasic) { + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + ConfigHelper::HttpProtocolOptions protocol_options; + auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); + auto* max_connection_duration = http_protocol_options->mutable_max_connection_duration(); + max_connection_duration->set_seconds(1); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 1); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 1); + + // Do not send any requests and validate that the max connection duration is reached. + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_max_duration_reached", 1); +} + // Per-stream idle timeout after having sent downstream headers. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { enable_per_stream_idle_timeout_ = true; diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 6fdc3bd7467c..d87aa4b39998 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -34,48 +34,6 @@ INSTANTIATE_TEST_SUITE_P(Protocols, IntegrationAdminTest, {Http::CodecType::HTTP1})), HttpProtocolIntegrationTest::protocolTestParamsToString); -TEST_P(IntegrationAdminTest, HealthCheck) { - initialize(); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); - EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); -} - -TEST_P(IntegrationAdminTest, HealthCheckWithoutServerStats) { - envoy::config::metrics::v3::StatsMatcher stats_matcher; - stats_matcher.mutable_exclusion_list()->add_patterns()->set_prefix("server."); - initialize(stats_matcher); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "POST", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/fail", response)); - EXPECT_EQ("503", request("http", "GET", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); - - EXPECT_EQ("200", request("admin", "POST", "/healthcheck/ok", response)); - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); - EXPECT_EQ("200", request("admin", "GET", "/stats", response)); - EXPECT_THAT(response->body(), Not(HasSubstr("server."))); -} - -TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { - config_helper_.prependFilter(ConfigHelper::defaultBufferFilter()); - initialize(); - - BufferingStreamDecoderPtr response; - EXPECT_EQ("200", request("http", "GET", "/healthcheck", response)); -} - TEST_P(IntegrationAdminTest, AdminLogging) { initialize(); diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index b190cef6edba..7d036b98a723 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -15,7 +15,6 @@ namespace Envoy { class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); config_helper_.addConfigModifier( [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto& hist_settings = @@ -30,14 +29,6 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { HttpIntegrationTest::initialize(); } - void initialize(envoy::config::metrics::v3::StatsMatcher stats_matcher) { - config_helper_.addConfigModifier( - [stats_matcher](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - *bootstrap.mutable_stats_config()->mutable_stats_matcher() = stats_matcher; - }); - initialize(); - } - absl::string_view request(const std::string port_key, const std::string method, const std::string endpoint, BufferingStreamDecoderPtr& response) { response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 5e8d025aa1ad..6b036954ea21 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -304,15 +304,12 @@ TEST_P(IntegrationTest, RouterDirectResponseEmptyBody) { } TEST_P(IntegrationTest, ConnectionClose) { - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + autonomous_upstream_ = true; initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = - codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{{":method", "GET"}, - {":path", "/healthcheck"}, - {":authority", "host"}, - {"connection", "close"}}); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/"}, {":authority", "host"}, {"connection", "close"}}); ASSERT_TRUE(response->waitForEndStream()); ASSERT_TRUE(codec_client_->waitForDisconnect()); @@ -562,6 +559,55 @@ name: matcher second_codec->close(); } +// Verifies routing via the match tree API. +TEST_P(IntegrationTest, MatchTreeRouting) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.experimental_matching_api", "true"); + + const std::string vhost_yaml = R"EOF( + name: vhost + domains: ["matcher.com"] + matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: match-header + exact_match_map: + map: + "route": + action: + name: route + typed_config: + "@type": type.googleapis.com/envoy.config.route.v3.Route + match: + prefix: / + route: + cluster: cluster_0 + )EOF"; + + envoy::config::route::v3::VirtualHost virtual_host; + TestUtility::loadFromYaml(vhost_yaml, virtual_host); + + config_helper_.addVirtualHost(virtual_host); + autonomous_upstream_ = true; + + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, + {":path", "/whatever"}, + {":scheme", "http"}, + {"match-header", "route"}, + {":authority", "matcher.com"}}; + auto response = codec_client_->makeHeaderOnlyRequest(headers); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_THAT(response->headers(), HttpStatusIs("200")); + + codec_client_->close(); +} + // This is a regression for https://github.com/envoyproxy/envoy/issues/2715 and validates that a // pending request is not sent on a connection that has been half-closed. TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { @@ -1278,17 +1324,8 @@ TEST_P(IntegrationTest, Connect) { EXPECT_EQ(normalizeDate(response1), normalizeDate(response2)); } -// Test that Envoy by default returns HTTP code 502 on upstream protocol error. -TEST_P(IntegrationTest, UpstreamProtocolErrorDefault) { - testRouterUpstreamProtocolError("502", "UPE"); -} - -// Test runtime overwrite to return 503 on upstream protocol error. -TEST_P(IntegrationTest, UpstreamProtocolErrorRuntimeOverwrite) { - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.return_502_for_upstream_protocol_errors", "false"); - testRouterUpstreamProtocolError("503", "UC"); -} +// Test that Envoy returns HTTP code 502 on upstream protocol error. +TEST_P(IntegrationTest, UpstreamProtocolError) { testRouterUpstreamProtocolError("502", "UPE"); } TEST_P(IntegrationTest, TestHead) { initialize(); diff --git a/test/integration/leds_integration_test.cc b/test/integration/leds_integration_test.cc new file mode 100644 index 000000000000..c7ceae29e151 --- /dev/null +++ b/test/integration/leds_integration_test.cc @@ -0,0 +1,816 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/type/v3/http.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" +#include "test/integration/http_integration.h" +#include "test/test_common/network_utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +// Integration test for LEDS features. CDS is consumed vi filesystem subscription, +// and EDS and LEDS are consumed via using the delta-xDS gRPC protocol. +class LedsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, + public HttpIntegrationTest { +protected: + struct FakeUpstreamInfo { + FakeHttpConnectionPtr connection_; + FakeUpstream* upstream_{}; + absl::flat_hash_map stream_by_resource_name_; + + static constexpr char default_stream_name[] = "default"; + + // Used for cases where only a single stream is needed. + FakeStreamPtr& defaultStream() { return stream_by_resource_name_[default_stream_name]; } + }; + + LedsIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()), + codec_client_type_(envoy::type::v3::HTTP1) { + use_lds_ = false; + create_xds_upstream_ = false; + // LEDS is only supported by delta-xDS. + sotw_or_delta_ = Grpc::SotwOrDelta::Delta; + } + + ~LedsIntegrationTest() override { + // First disconnect upstream connections to avoid FIN messages causing unexpected + // disconnects on the fake servers. + for (auto& host_upstream_info : hosts_upstreams_info_) { + resetFakeUpstreamInfo(host_upstream_info); + } + + resetFakeUpstreamInfo(leds_upstream_info_); + resetFakeUpstreamInfo(eds_upstream_info_); + } + + // A helper function to set the endpoints health status. + void setEndpointsHealthStatus( + const absl::flat_hash_set& endpoints_idxs, + envoy::config::core::v3::HealthStatus health_status, absl::string_view collection_prefix, + absl::flat_hash_map& + updated_endpoints) { + for (const auto endpoint_idx : endpoints_idxs) { + const std::string endpoint_name = absl::StrCat(collection_prefix, "endpoint", endpoint_idx); + envoy::config::endpoint::v3::LbEndpoint endpoint; + // Shift fake_upstreams_ by 2 (due to EDS and LEDS fake upstreams). + setUpstreamAddress(endpoint_idx + 2, endpoint); + endpoint.set_health_status(health_status); + updated_endpoints.emplace(endpoint_name, endpoint); + } + } + + // Sets endpoints in a specific locality (using the LEDS helper). + // We need to supply the endpoints via LEDS to provide health status. Use a + // filesystem delivery to simplify test mechanics. + void setEndpoints(const absl::flat_hash_set& healthy_endpoints_idxs, + const absl::flat_hash_set& degraded_endpoints_idxs, + const absl::flat_hash_set& unhealthy_endpoints_idxs, + const absl::flat_hash_set& unknown_endpoints_idxs, + const absl::flat_hash_set& removed_endpoints_idxs, + uint32_t locality_idx = 0, bool await_update = true) { + const auto& collection_prefix = localities_prefixes_[locality_idx]; + absl::flat_hash_map updated_endpoints; + std::vector removed_endpoints; + setEndpointsHealthStatus(healthy_endpoints_idxs, envoy::config::core::v3::HEALTHY, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(degraded_endpoints_idxs, envoy::config::core::v3::DEGRADED, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(unhealthy_endpoints_idxs, envoy::config::core::v3::UNHEALTHY, + collection_prefix, updated_endpoints); + setEndpointsHealthStatus(unknown_endpoints_idxs, envoy::config::core::v3::UNKNOWN, + collection_prefix, updated_endpoints); + + for (const auto removed_endpoint_idx : removed_endpoints_idxs) { + const std::string endpoint_name = + absl::StrCat(collection_prefix, "endpoint", removed_endpoint_idx); + removed_endpoints.emplace_back(endpoint_name); + } + + sendDeltaLedsResponse(updated_endpoints, removed_endpoints, "7", locality_idx); + + if (await_update) { + // Receive LEDS ack. + EXPECT_TRUE(compareDeltaDiscoveryRequest( + Config::TypeUrl::get().LbEndpoint, {}, {}, + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[locality_idx]])); + } + } + + // Sends an LEDS response to a specific locality, containing the updated + // endpoints map (resource name to endpoint data), and the list of resource + // names to remove. + void sendDeltaLedsResponse( + const absl::flat_hash_map& + to_update_map, + const std::vector& to_delete_list, const std::string& version, + uint32_t locality_idx) { + auto& locality_stream = + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[locality_idx]]; + ASSERT(locality_stream != nullptr); + envoy::service::discovery::v3::DeltaDiscoveryResponse response; + response.set_system_version_info(version); + response.set_type_url(Config::TypeUrl::get().LbEndpoint); + + for (const auto& endpoint_name : to_delete_list) { + *response.add_removed_resources() = endpoint_name; + } + for (const auto& [resource_name, lb_endpoint] : to_update_map) { + auto* resource = response.add_resources(); + resource->set_name(resource_name); + resource->set_version(version); + resource->mutable_resource()->PackFrom(lb_endpoint); + } + locality_stream->sendGrpcMessage(response); + } + + void createUpstreams() override { + // Add the EDS upstream. + eds_upstream_info_.upstream_ = &addFakeUpstream(FakeHttpConnection::Type::HTTP2); + // Add the LEDS upstream. + leds_upstream_info_.upstream_ = &addFakeUpstream(FakeHttpConnection::Type::HTTP2); + + // Create backends and initialize their wrapper. + HttpIntegrationTest::createUpstreams(); + // Store all hosts upstreams info in a single place so it would be easily + // accessible. + ASSERT(fake_upstreams_.size() == fake_upstreams_count_ + 2); + hosts_upstreams_info_.reserve(fake_upstreams_count_); + // Skip the first 2 fake upstreams as they are reserved for EDS and LEDS. + for (size_t i = 2; i < fake_upstreams_.size(); ++i) { + FakeUpstreamInfo host_info; + host_info.upstream_ = &(*fake_upstreams_[i]); + hosts_upstreams_info_.emplace_back(std::move(host_info)); + } + } + + // Initialize a gRPC stream of an upstream server. + void initializeStream(FakeUpstreamInfo& upstream_info, + const std::string& resource_name = FakeUpstreamInfo::default_stream_name) { + if (!upstream_info.connection_) { + auto result = + upstream_info.upstream_->waitForHttpConnection(*dispatcher_, upstream_info.connection_); + RELEASE_ASSERT(result, result.message()); + } + if (!upstream_info.stream_by_resource_name_.try_emplace(resource_name, nullptr).second) { + RELEASE_ASSERT(false, + fmt::format("stream with resource name '{}' already exists!", resource_name)); + } + + auto result = upstream_info.connection_->waitForNewStream( + *dispatcher_, upstream_info.stream_by_resource_name_[resource_name]); + RELEASE_ASSERT(result, result.message()); + upstream_info.stream_by_resource_name_[resource_name]->startGrpcStream(); + } + + // A specific function to initialize LEDS streams. This was introduced to + // handle the non-deterministic requests order when more than one locality is + // used. This method first establishes the gRPC stream, fetches the first + // request and reads its requested resource name, and then assigns the stream + // to the internal data-structure. + void initializeAllLedsStreams() { + // Create a set of localities that are expected. + absl::flat_hash_set expected_localities_prefixes(localities_prefixes_.begin(), + localities_prefixes_.end()); + + if (!leds_upstream_info_.connection_) { + auto result = leds_upstream_info_.upstream_->waitForHttpConnection( + *dispatcher_, leds_upstream_info_.connection_); + RELEASE_ASSERT(result, result.message()); + } + + // Wait for the exact number of streams. + for (uint32_t i = 0; i < localities_prefixes_.size(); ++i) { + // Create the stream for the LEDS collection and fetch the name from the + // contents, then validate that this is an expected collection + FakeStreamPtr temp_stream; + envoy::service::discovery::v3::DeltaDiscoveryRequest request; + auto result = leds_upstream_info_.connection_->waitForNewStream(*dispatcher_, temp_stream); + RELEASE_ASSERT(result, result.message()); + temp_stream->startGrpcStream(); + RELEASE_ASSERT(temp_stream->waitForGrpcMessage(*dispatcher_, request), + "LEDS message did not arrive as expected"); + RELEASE_ASSERT(request.resource_names_subscribe().size() == 1, + "Each LEDS request in this test must have a single resource"); + // Remove the "*" from the collection name to match against the set + // contents. + const auto request_collection_name = *request.resource_names_subscribe().begin(); + const auto pos = request_collection_name.find_last_of('*'); + ASSERT(pos != std::string::npos); + const auto request_collection_prefix = request_collection_name.substr(0, pos); + auto set_it = expected_localities_prefixes.find(request_collection_prefix); + ASSERT(set_it != expected_localities_prefixes.end()); + // Associate the stream with the locality prefix. + leds_upstream_info_.stream_by_resource_name_[*set_it] = std::move(temp_stream); + // Remove the locality prefix from the expected set. + expected_localities_prefixes.erase(set_it); + } + } + + void initializeTest(bool http_active_hc, uint32_t localities_num = 1) { + // Set up a single upstream host for the LEDS cluster using HTTP2 (gRPC). + setUpstreamCount(4); + + config_helper_.addConfigModifier([this, http_active_hc]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Add a static EDS cluster. + auto* eds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + eds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + eds_cluster->set_name("eds_cluster"); + eds_cluster->mutable_load_assignment()->set_cluster_name("eds_cluster"); + ConfigHelper::setHttp2(*eds_cluster); + + // Add a static LEDS cluster. + auto* leds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + leds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + leds_cluster->set_name("leds_cluster"); + leds_cluster->mutable_load_assignment()->set_cluster_name("leds_cluster"); + ConfigHelper::setHttp2(*leds_cluster); + + // Remove the static cluster (cluster_0) and set up CDS. + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); + bootstrap.mutable_static_resources()->mutable_clusters()->erase( + bootstrap.mutable_static_resources()->mutable_clusters()->begin()); + + // Set the default static cluster to use EDS. + auto& cluster_0 = cluster_; + cluster_0.set_name("cluster_0"); + cluster_0.set_type(envoy::config::cluster::v3::Cluster::EDS); + cluster_0.mutable_connect_timeout()->CopyFrom(Protobuf::util::TimeUtil::SecondsToDuration(5)); + auto* eds_cluster_config = cluster_0.mutable_eds_cluster_config(); + eds_cluster_config->mutable_eds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + auto* api_config_source = + eds_cluster_config->mutable_eds_config()->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::DELTA_GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "eds_cluster", eds_upstream_info_.upstream_->localAddress()); + if (http_active_hc) { + auto* health_check = cluster_0.add_health_checks(); + health_check->mutable_timeout()->set_seconds(30); + // TODO(mattklein123): Consider using simulated time here. + health_check->mutable_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_no_traffic_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_unhealthy_threshold()->set_value(1); + health_check->mutable_healthy_threshold()->set_value(1); + health_check->mutable_http_health_check()->set_path("/healthcheck"); + health_check->mutable_http_health_check()->set_codec_client_type(codec_client_type_); + } + // Set the cluster using CDS. + cds_helper_.setCds({cluster_}); + }); + + // Set validate_clusters to false to allow us to reference a CDS cluster. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); }); + + defer_listener_finalization_ = true; + initialize(); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Create the EDS connection and stream. + initializeStream(eds_upstream_info_); + // Add the assignment and localities. + cluster_load_assignment_.set_cluster_name("cluster_0"); + localities_prefixes_.reserve(localities_num); + for (uint32_t locality_idx = 0; locality_idx < localities_num; ++locality_idx) { + // Setup per locality LEDS config over gRPC. + auto* locality_lb_endpoints = cluster_load_assignment_.add_endpoints(); + locality_lb_endpoints->set_priority(locality_idx); + auto* leds_locality_config = locality_lb_endpoints->mutable_leds_cluster_locality_config(); + auto* leds_config = leds_locality_config->mutable_leds_config(); + + leds_config->set_resource_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* api_config_source = leds_config->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::DELTA_GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "leds_cluster", leds_upstream_info_.upstream_->localAddress()); + + const std::string locality_endpoints_prefix = fmt::format( + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/cluster0/locality{}/", locality_idx); + localities_prefixes_.push_back(locality_endpoints_prefix); + const std::string locality_endpoints_collection_name = + absl::StrCat(locality_endpoints_prefix, "*"); + leds_locality_config->set_leds_collection_name(locality_endpoints_collection_name); + } + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Do the initial compareDiscoveryRequest / sendDiscoveryResponse for cluster_'s localities + // (ClusterLoadAssignment). + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, + {"cluster_0"}, {}, + eds_upstream_info_.defaultStream())); + sendDeltaDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {cluster_load_assignment_}, {}, "2", + eds_upstream_info_.defaultStream()); + + // Receive EDS ack. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, {}, {}, + eds_upstream_info_.defaultStream())); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Create the LEDS connection and stream(s). + // Wait for all the LEDS streams to be established. Note that if more + // than one locality has issued a LEDS request, the order of the requests + // can be non-deterministic (e.g., the request for "locality1" might be + // received before the request for "locality0"). Therefore we first wait + // for all the streams to be established, and only then verify that all the + // requests arrived as expected. + initializeAllLedsStreams(); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + } + + void resetFakeUpstreamInfo(FakeUpstreamInfo& upstream_info) { + if (upstream_info.connection_ == nullptr || upstream_info.upstream_ == nullptr) { + upstream_info.upstream_ = nullptr; + return; + } + AssertionResult result = upstream_info.connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = upstream_info.connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + upstream_info.connection_.reset(); + upstream_info.upstream_ = nullptr; + } + + void waitForHealthCheck(uint32_t upstream_info_idx) { + auto& host_info = hosts_upstreams_info_[upstream_info_idx]; + if (host_info.connection_ == nullptr) { + ASSERT_TRUE(host_info.upstream_->waitForHttpConnection(*dispatcher_, host_info.connection_)); + } + ASSERT_TRUE(host_info.connection_->waitForNewStream(*dispatcher_, host_info.defaultStream())); + ASSERT_TRUE(host_info.defaultStream()->waitForEndStream(*dispatcher_)); + + EXPECT_EQ(host_info.defaultStream()->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host_info.defaultStream()->headers().getMethodValue(), "GET"); + } + + envoy::type::v3::CodecClientType codec_client_type_{}; + CdsHelper cds_helper_; + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_; + envoy::config::cluster::v3::Cluster cluster_; + std::vector localities_prefixes_; + std::vector hosts_upstreams_info_; + FakeUpstreamInfo eds_upstream_info_; + FakeUpstreamInfo leds_upstream_info_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, LedsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); + +// Validates basic LEDS request response behavior. +TEST_P(LedsIntegrationTest, BasicLeds) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The endpoint sent a valid health-check so the cluster should be active. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Validates adding endpoints using LEDS. +TEST_P(LedsIntegrationTest, LedsAdd) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The cluster should have now a single healthy host. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Add 2 more endpoints using LEDS in unknown state. + setEndpoints({}, {}, {}, {1, 2}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + + // There should be additional 2 backends in the cluster, and only one healthy. + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Send health-check responses back from the new hosts. + for (int i = 1; i < 3; ++i) { + waitForHealthCheck(i); + hosts_upstreams_info_[i].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + } + + // Verify that Envoy observes the healthy endpoints. + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 3); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that updating the same endpoint doesn't change anything. +TEST_P(LedsIntegrationTest, LedsUpdateSameEndpoint) { + initializeTest(true); + + // Send an endpoint update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The cluster should have now a single healthy host. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // "Update" the endpoint by sending the same state. The endpoint should still + // be healthy, as the active health check cleared it. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + + // There should be additional 2 backends in the cluster, and only one healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Verify that Envoy observes the healthy endpoint. + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify endpoint removal using LEDS. +TEST_P(LedsIntegrationTest, EndpointRemoval) { + // Set health-checking to false, so Envoy will remove the endpoint, although it + // is still healthy. + initializeTest(false); + + // Send 2 endpoints update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0, 1}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(3, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Remove one of the endpoints. + setEndpoints({}, {}, {}, {}, {0}); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that a config removing an unknown endpoint is a no-op (similar to CDS). +TEST_P(LedsIntegrationTest, UnknownEndpointRemoval) { + initializeTest(true); + + // Send 2 endpoints update with an unknown state using LEDS. + setEndpoints({}, {}, {}, {0}, {}); + + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + // Waiting for the endpoint to become Healthy to end warming and move the cluster to active. + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // There should be a single backend in the cluster, and not yet healthy. + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first health-check and verify the host is healthy. This should warm the initial + // cluster. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // The endpoints sent valid health-checks so the cluster should be active. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Remove one of the endpoints. + setEndpoints({}, {}, {}, {}, {2}); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.leds.update_rejected")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Validates that endpoints can be added and then moved to other localities without causing crashes +// (Primarily as a regression test for https://github.com/envoyproxy/envoy/issues/8764). +TEST_P(LedsIntegrationTest, MoveEndpointsBetweenLocalities) { + // Create 2 localities in the cluster, no health-check as part of this test. + initializeTest(false, 2); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 0. + setEndpoints({}, {}, {}, {0}, {}, 0); + + // The update only updates the first locality, but the cluster should still be + // in warmed state. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 1. + setEndpoints({}, {}, {}, {1, 2}, {}, 1); + + // All localities should have endpoints so the cluster warm-up should be over. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // There should be a single backend in the cluster, all healthy as there isn't + // active health-check. + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Move one endpoint from locality1 to locality0. + setEndpoints({}, {}, {}, {0, 2}, {}, 0); + setEndpoints({}, {}, {}, {}, {2}, 1); + + // Wait for the additional 2 LEDS updates. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 4); + + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Move one endpoint from locality0 to locality1, and remove the other endpoint. + setEndpoints({}, {}, {}, {}, {2}, 0); + setEndpoints({}, {}, {}, {0}, {}, 1); + + // Wait for the additional 2 LEDS updates. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 6); + + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that an endpoint can be in 2 localities at the same time. +TEST_P(LedsIntegrationTest, LocalitiesShareEndpoint) { + // Create 2 localities in the cluster, no health-check as part of this test. + initializeTest(false, 2); + + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 0. + setEndpoints({}, {}, {}, {0}, {}, 0); + + // The update only updates the first locality, but the cluster should still be + // in warmed state. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster_manager.warming_clusters")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster_manager.active_clusters")->value()); + + // Send an endpoint update using LEDS for locality 1 with a different endpoint. + setEndpoints({}, {}, {}, {1}, {}, 1); + + // All localities should have endpoints so the cluster warm-up should be over. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 2); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // There should be 2 hosts in the cluster, all healthy as there isn't active health-check. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Send a LEDS update to locality 1 with the same endpoint that is in locality 0. + setEndpoints({}, {}, {}, {0}, {}, 1); + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 3); + + // There should be 2 hosts in the cluster, all healthy as there isn't active health-check. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Remove the endpoint from one locality. + setEndpoints({}, {}, {}, {}, {0}, 0); + + // Wait for the additional LEDS update. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 4); + + // There are 2 endpoints left in locality 1. + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +// Verify that a host stabilized via active health checking which is first removed from LEDS and +// then fails health checking is removed. +TEST_P(LedsIntegrationTest, RemoveAfterHcFail) { + initializeTest(true); + setEndpoints({}, {}, {}, {0}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.leds.update_success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Wait for the first HC and verify the host is healthy. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 1); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + + // Clear out the host and verify the host is still healthy. + setEndpoints({}, {}, {}, {}, {0}); + + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.leds.update_success")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + + // Fail HC and verify the host is gone. + waitForHealthCheck(0); + hosts_upstreams_info_[0].defaultStream()->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "503"}, {"connection", "close"}}, true); + test_server_->waitForGaugeEq("cluster.cluster_0.membership_healthy", 0); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); +} + +// Validate that health status updates are consumed from LEDS. +TEST_P(LedsIntegrationTest, HealthUpdate) { + initializeTest(false); + // Initial state, no cluster members. + EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // 2 healthy endpoints. + setEndpoints({0, 1}, {}, {}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Drop to 0/2 healthy endpoints (2 unknown health state). + setEndpoints({}, {}, {0, 1}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Increase to 1/2 healthy endpoints (host 1 will remain unhealthy). + setEndpoints({0}, {}, {1}, {}, {}); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Add host and modify healthy to 2/3 healthy endpoints. + setEndpoints({2}, {}, {1}, {}, {}); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + // Modify healthy to 2/3 healthy and 1/3 degraded. + setEndpoints({}, {1}, {}, {}, {}); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.membership_change")->value()); + EXPECT_EQ(3, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(2, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_degraded")->value()); +} + +// Validates that in a LEDS response that contains 2 endpoints with the same +// address, only the first will be used. +TEST_P(LedsIntegrationTest, LedsSameAddressEndpoints) { + initializeTest(false); + + // Send a response with 2 endpoints with a different resource name but that + // map to the same address. + const auto& collection_prefix = localities_prefixes_[0]; + absl::flat_hash_map updated_endpoints; + std::vector removed_endpoints; + + const std::vector endpoints_names{ + absl::StrCat(collection_prefix, "endpoint0"), + absl::StrCat(collection_prefix, "endpoint1"), + }; + + for (const auto& endpoint_name : endpoints_names) { + envoy::config::endpoint::v3::LbEndpoint endpoint; + // Shift fake_upstreams_ by 2 (due to EDS and LEDS fake upstreams). + setUpstreamAddress(2, endpoint); + endpoint.set_health_status(envoy::config::core::v3::HEALTHY); + updated_endpoints.emplace(endpoint_name, endpoint); + } + + sendDeltaLedsResponse(updated_endpoints, removed_endpoints, "7", 0); + + // Await for update (LEDS Ack). + EXPECT_TRUE(compareDeltaDiscoveryRequest( + Config::TypeUrl::get().LbEndpoint, {}, {}, + leds_upstream_info_.stream_by_resource_name_[localities_prefixes_[0]])); + + // Verify that the update is successful. + test_server_->waitForCounterEq("cluster.cluster_0.leds.update_success", 1); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + + // Verify that only one endpoint was processed. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 47d7859bf5f0..06eacbaa0758 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -250,14 +250,14 @@ TEST_P(ListenerIntegrationTest, RejectsUnsupportedTypedPerFilterConfig) { route: cluster: cluster_0 typed_per_filter_config: - envoy.filters.http.health_check: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + set-response-code: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + code: 403 http_filters: - - name: envoy.filters.http.health_check + - name: set-response-code typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + code: 402 - name: envoy.filters.http.router )EOF"); sendLdsResponse({listener}, "2"); @@ -554,6 +554,11 @@ TEST_P(ListenerIntegrationTest, ChangeListenerAddress) { EXPECT_EQ(request_size, upstream_request_->bodyLength()); } +struct PerConnection { + std::string response_; + std::unique_ptr client_conn_; + FakeRawConnectionPtr upstream_conn_; +}; class RebalancerTest : public testing::TestWithParam, public BaseIntegrationTest { public: @@ -585,10 +590,7 @@ class RebalancerTest : public testing::TestWithParamset_value(false); virtual_listener_config.set_name("balanced_target_listener"); virtual_listener_config.mutable_connection_balance_config()->mutable_exact_balance(); - - // TODO(lambdai): Replace by getLoopbackAddressUrlString to emulate the real world. - *virtual_listener_config.mutable_address()->mutable_socket_address()->mutable_address() = - "127.0.0.2"; + *virtual_listener_config.mutable_stat_prefix() = target_listener_prefix_; virtual_listener_config.mutable_address()->mutable_socket_address()->set_port_value(80); }); BaseIntegrationTest::initialize(); @@ -604,14 +606,66 @@ class RebalancerTest : public testing::TestWithParam client_conn_; - FakeRawConnectionPtr upstream_conn_; + void verifyBalance(uint32_t repeats = 10) { + // The balancer is balanced as per active connection instead of total connection. + // The below vector maintains all the connections alive. + std::vector connections; + for (uint32_t i = 0; i < repeats * concurrency_; ++i) { + connections.emplace_back(); + connections.back().client_conn_ = + createConnectionAndWrite("dummy", connections.back().response_); + connections.back().client_conn_->waitForConnection(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(connections.back().upstream_conn_)); + } + for (auto& conn : connections) { + conn.client_conn_->close(); + while (!conn.client_conn_->closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + } + ASSERT_EQ(TestUtility::findCounter(test_server_->statStore(), + absl::StrCat("listener.", target_listener_prefix_, + ".worker_0.downstream_cx_total")) + ->value(), + repeats); + ASSERT_EQ(TestUtility::findCounter(test_server_->statStore(), + absl::StrCat("listener.", target_listener_prefix_, + ".worker_1.downstream_cx_total")) + ->value(), + repeats); + } + + // The stats prefix that shared by ipv6 and ipv4 listener. + std::string target_listener_prefix_{"balanced_listener"}; }; +TEST_P(RebalancerTest, BindToPortUpdate) { + concurrency_ = 2; + initialize(); + + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + // This virtual listener need updating. + auto& virtual_listener_config = *bootstrap.mutable_static_resources()->mutable_listeners(1); + *virtual_listener_config.mutable_address()->mutable_socket_address()->mutable_address() = + bootstrap.static_resources().listeners(0).address().socket_address().address(); + (*(*virtual_listener_config.mutable_metadata()->mutable_filter_metadata())["random_filter_name"] + .mutable_fields())["random_key"] + .set_number_value(2); + }); + // Create an LDS response with the new config, and reload config. + new_config_helper.setLds("1"); + + test_server_->waitForCounterEq("listener_manager.listener_modified", 1); + test_server_->waitForGaugeEq("listener_manager.total_listeners_draining", 0); + + verifyBalance(); +} + // Verify the connections are distributed evenly on the 2 worker threads of the redirected // listener. // Currently flaky because the virtual listener create listen socket anyway despite the socket is @@ -620,36 +674,8 @@ TEST_P(RebalancerTest, DISABLED_RedirectConnectionIsBalancedOnDestinationListene auto ip_address_str = Network::Test::getLoopbackAddressUrlString(TestEnvironment::getIpVersionsForTest().front()); concurrency_ = 2; - int repeats = 10; initialize(); - - // The balancer is balanced as per active connection instead of total connection. - // The below vector maintains all the connections alive. - std::vector connections; - for (uint32_t i = 0; i < repeats * concurrency_; ++i) { - connections.emplace_back(); - connections.back().client_conn_ = - createConnectionAndWrite("dummy", connections.back().response_); - connections.back().client_conn_->waitForConnection(); - ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(connections.back().upstream_conn_)); - } - for (auto& conn : connections) { - conn.client_conn_->close(); - while (!conn.client_conn_->closed()) { - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - } - } - - ASSERT_EQ(TestUtility::findCounter( - test_server_->statStore(), - absl::StrCat("listener.", ip_address_str, "_80.worker_0.downstream_cx_total")) - ->value(), - repeats); - ASSERT_EQ(TestUtility::findCounter( - test_server_->statStore(), - absl::StrCat("listener.", ip_address_str, "_80.worker_1.downstream_cx_total")) - ->value(), - repeats); + verifyBalance(); } INSTANTIATE_TEST_SUITE_P(IpVersions, RebalancerTest, diff --git a/test/integration/load_balancers/custom_lb_policy.h b/test/integration/load_balancers/custom_lb_policy.h index 55e5e60cef81..132e5e5f03b7 100644 --- a/test/integration/load_balancers/custom_lb_policy.h +++ b/test/integration/load_balancers/custom_lb_policy.h @@ -29,6 +29,14 @@ class ThreadAwareLbImpl : public Upstream::ThreadAwareLoadBalancer { Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } + OptRef lifetimeCallbacks() override { + return {}; + } + absl::optional + selectExistingConnection(Upstream::LoadBalancerContext*, const Upstream::Host&, + std::vector&) override { + return {}; + } const Upstream::HostSharedPtr host_; }; diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index 27dfeff7580c..730639e26117 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -162,8 +162,7 @@ TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { } TEST_P(Http2IntegrationTest, Http2DownstreamKeepalive) { - // TODO(#16751) Need to support keepalive. - EXCLUDE_DOWNSTREAM_HTTP3; + EXCLUDE_DOWNSTREAM_HTTP3; // Http3 keepalive doesn't timeout and close connection. constexpr uint64_t interval_ms = 1; constexpr uint64_t timeout_ms = 250; config_helper_.addConfigModifier( @@ -954,7 +953,7 @@ TEST_P(Http2IntegrationTest, BadFrame) { // Send client headers, a GoAway and then a body and ensure the full request and // response are received. TEST_P(Http2IntegrationTest, GoAway) { - config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); + autonomous_upstream_ = true; initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1880,4 +1879,35 @@ TEST_P(Http2IntegrationTest, InvalidTrailers) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("invalid")); } +TEST_P(Http2IntegrationTest, InconsistentContentLength) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-length", "1025"}}); + + auto response = std::move(encoder_decoder.second); + request_encoder_ = &encoder_decoder.first; + codec_client_->sendData(*request_encoder_, 1024, false); + codec_client_->sendTrailers(*request_encoder_, + Http::TestRequestTrailerMapImpl{{"trailer", "value"}}); + + // Inconsistency in content-length header and the actually body length should be treated as a + // stream error. + ASSERT_TRUE(response->waitForReset()); + // http3.inconsistent_content_length. + if (downstreamProtocol() == Http::CodecType::HTTP3) { + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("inconsistent_content_length")); + } else { + EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason()); + // http2.violation.of.messaging.rule + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("violation")); + } +} + } // namespace Envoy diff --git a/test/integration/multiplexed_upstream_integration_test.cc b/test/integration/multiplexed_upstream_integration_test.cc index c55f1b22ff6c..4acf747d777c 100644 --- a/test/integration/multiplexed_upstream_integration_test.cc +++ b/test/integration/multiplexed_upstream_integration_test.cc @@ -193,65 +193,6 @@ TEST_P(Http2UpstreamIntegrationTest, BidirectionalStreamingReset) { EXPECT_EQ(1, downstreamTxResetCounterValue()); } -void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, - uint32_t request2_bytes, - uint32_t response1_bytes, - uint32_t response2_bytes) { - FakeStreamPtr upstream_request1; - FakeStreamPtr upstream_request2; - initialize(); - codec_client_ = makeHttpConnection(lookupPort("http")); - - // Start request 1 - auto encoder_decoder1 = - codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}); - Http::RequestEncoder* encoder1 = &encoder_decoder1.first; - auto response1 = std::move(encoder_decoder1.second); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request1)); - - // Start request 2 - auto encoder_decoder2 = - codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}); - Http::RequestEncoder* encoder2 = &encoder_decoder2.first; - auto response2 = std::move(encoder_decoder2.second); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request2)); - - // Finish request 1 - codec_client_->sendData(*encoder1, request1_bytes, true); - ASSERT_TRUE(upstream_request1->waitForEndStream(*dispatcher_)); - - // Finish request 2 - codec_client_->sendData(*encoder2, request2_bytes, true); - ASSERT_TRUE(upstream_request2->waitForEndStream(*dispatcher_)); - - // Respond to request 2 - upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request2->encodeData(response2_bytes, true); - ASSERT_TRUE(response2->waitForEndStream()); - EXPECT_TRUE(upstream_request2->complete()); - EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); - EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().getStatusValue()); - EXPECT_EQ(response2_bytes, response2->body().size()); - - // Respond to request 1 - upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request1->encodeData(response1_bytes, true); - ASSERT_TRUE(response1->waitForEndStream()); - EXPECT_TRUE(upstream_request1->complete()); - EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); - EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().getStatusValue()); - EXPECT_EQ(response1_bytes, response1->body().size()); -} - TEST_P(Http2UpstreamIntegrationTest, SimultaneousRequest) { simultaneousRequest(1024, 512, 1023, 513); } @@ -261,32 +202,10 @@ TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) { simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); } -TEST_P(Http2UpstreamIntegrationTest, SimultaneousRequestAlpn) { - if (upstreamProtocol() == Http::CodecType::HTTP3) { - // TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate - // protocols cache, but currently there is no easy way to initialize the test with this state. - return; - } - - use_alpn_ = true; - simultaneousRequest(1024, 512, 1023, 513); -} - -TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimitsAlpn) { - if (upstreamProtocol() == Http::CodecType::HTTP3) { - // TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate - // protocols cache, but currently there is no easy way to initialize the test with this state. - return; - } - - use_alpn_ = true; - config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. - simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); -} - -void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, uint32_t) { +void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, + uint32_t max_response_bytes, + uint32_t num_requests) { TestRandomGenerator rand; - const uint32_t num_requests = 50; std::vector encoders; std::vector responses; std::vector response_bytes; @@ -295,7 +214,7 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt codec_client_ = makeHttpConnection(lookupPort("http")); for (uint32_t i = 0; i < num_requests; ++i) { - response_bytes.push_back(rand.random() % (1024 * 2)); + response_bytes.push_back(rand.random() % (max_response_bytes)); auto headers = Http::TestRequestHeaderMapImpl{ {":method", "POST"}, {":path", "/test/long/url"}, @@ -329,7 +248,25 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt } TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) { - manySimultaneousRequests(1024, 1024); + manySimultaneousRequests(1024, 1024, 100); +} + +TEST_P(Http2UpstreamIntegrationTest, TooManySimultaneousRequests) { + manySimultaneousRequests(1024, 1024, 200); +} + +TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequestsTightUpstreamLimits) { + if (upstreamProtocol() == Http::CodecType::HTTP2) { + return; + } + envoy::config::core::v3::Http2ProtocolOptions config; + config.mutable_max_concurrent_streams()->set_value(1); + mergeOptions(config); + envoy::config::listener::v3::QuicProtocolOptions options; + options.mutable_quic_protocol_options()->mutable_max_concurrent_streams()->set_value(1); + mergeOptions(options); + + manySimultaneousRequests(1024, 1024, 10); } TEST_P(Http2UpstreamIntegrationTest, ManyLargeSimultaneousRequestWithBufferLimits) { @@ -555,8 +492,6 @@ TEST_P(Http2UpstreamIntegrationTest, ConfigureHttpOverGrpcLogs) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - const std::string access_log_name = - TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); // Configure just enough of an upstream access log to reference the upstream headers. const std::string yaml_string = R"EOF( name: router @@ -651,51 +586,4 @@ TEST_P(Http2UpstreamIntegrationTest, UpstreamGoaway) { cleanupUpstreamAndDownstream(); } -#ifdef ENVOY_ENABLE_QUIC - -class MixedUpstreamIntegrationTest : public Http2UpstreamIntegrationTest { -protected: - void initialize() override { - use_alpn_ = true; - Http2UpstreamIntegrationTest::initialize(); - } - void createUpstreams() override { - ASSERT_EQ(upstreamProtocol(), Http::CodecType::HTTP3); - ASSERT_EQ(fake_upstreams_count_, 1); - ASSERT_FALSE(autonomous_upstream_); - - if (use_http2_) { - auto config = configWithType(Http::CodecType::HTTP2); - Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); - addFakeUpstream(std::move(factory), Http::CodecType::HTTP2); - } else { - auto config = configWithType(Http::CodecType::HTTP3); - Network::TransportSocketFactoryPtr factory = createUpstreamTlsContext(config); - addFakeUpstream(std::move(factory), Http::CodecType::HTTP3); - } - } - - bool use_http2_{false}; -}; - -// TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate -// protocols cache, but currently there is no easy way to initialize the test with this state. -TEST_P(MixedUpstreamIntegrationTest, DISABLED_SimultaneousRequestAutoWithHttp3) { - use_alternate_protocols_cache_ = true; - testRouterRequestAndResponseWithBody(0, 0, false); -} - -TEST_P(MixedUpstreamIntegrationTest, DISABLED_SimultaneousRequestAutoWithHttp2) { - use_alternate_protocols_cache_ = true; - use_http2_ = true; - testRouterRequestAndResponseWithBody(0, 0, false); -} - -INSTANTIATE_TEST_SUITE_P(Protocols, MixedUpstreamIntegrationTest, - testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( - {Http::CodecType::HTTP2}, {Http::CodecType::HTTP3})), - HttpProtocolIntegrationTest::protocolTestParamsToString); - -#endif - } // namespace Envoy diff --git a/test/integration/multiplexed_upstream_integration_test.h b/test/integration/multiplexed_upstream_integration_test.h index 6abcc7946024..14aeb56a49c0 100644 --- a/test/integration/multiplexed_upstream_integration_test.h +++ b/test/integration/multiplexed_upstream_integration_test.h @@ -9,18 +9,15 @@ class Http2UpstreamIntegrationTest : public HttpProtocolIntegrationTest { public: void initialize() override { upstream_tls_ = true; - config_helper_.configureUpstreamTls(use_alpn_, upstreamProtocol() == Http::CodecType::HTTP3, - use_alternate_protocols_cache_); + config_helper_.configureUpstreamTls(use_alpn_, upstreamProtocol() == Http::CodecType::HTTP3); HttpProtocolIntegrationTest::initialize(); } void bidirectionalStreaming(uint32_t bytes); - void simultaneousRequest(uint32_t request1_bytes, uint32_t request2_bytes, - uint32_t response1_bytes, uint32_t response2_bytes); - void manySimultaneousRequests(uint32_t request_bytes, uint32_t response_bytes); + void manySimultaneousRequests(uint32_t request_bytes, uint32_t max_response_bytes, + uint32_t num_streams = 50); bool use_alpn_{false}; - bool use_alternate_protocols_cache_{false}; uint64_t upstreamRxResetCounterValue(); uint64_t upstreamTxResetCounterValue(); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index a33c7bc0e83d..50a7d249fe9c 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -134,7 +134,10 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { } // Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301 -TEST_P(DownstreamProtocolIntegrationTest, RouterRedirect) { +TEST_P(DownstreamProtocolIntegrationTest, RouterRedirectHttpRequest) { + autonomous_upstream_ = true; + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%"); auto host = config_helper_.createVirtualHost("www.redirect.com", "/"); host.set_require_tls(envoy::config::route::v3::VirtualHost::ALL); config_helper_.addVirtualHost(host); @@ -143,14 +146,20 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterRedirect) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/foo", "", downstream_protocol_, version_, "www.redirect.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("301", response->headers().getStatusValue()); - EXPECT_EQ("https://www.redirect.com/foo", - response->headers().get(Http::Headers::get().Location)[0]->value().getStringView()); + if (downstream_protocol_ <= Http::CodecType::HTTP2) { + EXPECT_EQ("301", response->headers().getStatusValue()); + EXPECT_EQ("https://www.redirect.com/foo", + response->headers().get(Http::Headers::get().Location)[0]->value().getStringView()); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(145, 45, 111, 23), + BytesCountExpectation(0, 30, 0, 30)); + } else { + // All QUIC requests use https, and should not be redirected. (Even those sent with http scheme + // will be overridden to https by HCM.) + EXPECT_EQ("200", response->headers().getStatusValue()); + } } TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", "true"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -164,48 +173,6 @@ TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { EXPECT_EQ("600", response->headers().getStatusValue()); } -// Add a health check filter and verify correct computation of health based on upstream status. -TEST_P(DownstreamProtocolIntegrationTest, ComputedHealthCheck) { - config_helper_.prependFilter(R"EOF( -name: health_check -typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false - cluster_min_healthy_percentages: - example_cluster_name: { value: 75 } -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); - ASSERT_TRUE(response->waitForEndStream()); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); -} - -// Add a health check filter and verify correct computation of health based on upstream status. -TEST_P(DownstreamProtocolIntegrationTest, ModifyBuffer) { - config_helper_.prependFilter(R"EOF( -name: health_check -typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck - pass_through_mode: false - cluster_min_healthy_percentages: - example_cluster_name: { value: 75 } -)EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}}); - ASSERT_TRUE(response->waitForEndStream()); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().getStatusValue()); -} - // Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { config_helper_.prependFilter(R"EOF( @@ -535,6 +502,27 @@ TEST_P(ProtocolIntegrationTest, 304HeadResponseWithoutContentLengthLegacy) { EXPECT_TRUE(response->headers().get(Http::LowerCaseString("content-length")).empty()); } +// Tests that the response to a HEAD request can have content-length header but empty body. +TEST_P(ProtocolIntegrationTest, 200HeadResponseWithContentLength) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "HEAD"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"if-none-match", "\"1234567890\""}}); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-length", "123"}}, true); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ( + "123", + response->headers().get(Http::LowerCaseString("content-length"))[0]->value().getStringView()); +} + // Tests missing headers needed for H/1 codec first line. TEST_P(DownstreamProtocolIntegrationTest, DownstreamRequestWithFaultyFilter) { if (upstreamProtocol() == Http::CodecType::HTTP3) { @@ -627,6 +615,52 @@ TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReply) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("InvalidHeaderFilter_ready\n")); } +TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyDownstreamBytesCount) { + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Missing method + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"remove-method", "yes"}, + {"send-reply", "yes"}}); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(90, 80, 71, 46), + BytesCountExpectation(0, 58, 0, 58)); +} + +TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyUpstreamBytesCount) { + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Missing method + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"remove-method", "yes"}, + {"send-reply", "yes"}}); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + expectUpstreamBytesSentAndReceived(BytesCountExpectation(0, 0, 0, 0), + BytesCountExpectation(0, 0, 0, 0)); +} + TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyWithBody) { useAccessLog("%RESPONSE_CODE_DETAILS%"); config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " @@ -649,6 +683,30 @@ TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyWithBody) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("InvalidHeaderFilter_ready\n")); } +TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyWithBodyBytesCount) { + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Missing method + auto response = + codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"remove-method", "yes"}, + {"send-reply", "yes"}}, + 1024); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(109, 1144, 90, 73), + BytesCountExpectation(0, 58, 0, 58)); +} + // Regression test for https://github.com/envoyproxy/envoy/issues/10270 TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { // Header with at least 20kb of spaces surrounded by non-whitespace characters to ensure that @@ -692,6 +750,8 @@ TEST_P(ProtocolIntegrationTest, Retry) { auto& cluster = *bootstrap.mutable_static_resources()->mutable_clusters(0); cluster.mutable_track_cluster_stats()->set_request_response_sizes(true); }); + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeRequestWithBody( @@ -748,6 +808,12 @@ TEST_P(ProtocolIntegrationTest, Retry) { EXPECT_EQ(find_histo_sample_count("cluster.cluster_0.upstream_rq_headers_size"), 2); EXPECT_EQ(find_histo_sample_count("cluster.cluster_0.upstream_rs_headers_size"), 2); + + // The two requests are sent with https scheme rather than http for QUIC downstream. + const size_t quic_https_extra_bytes = (downstreamProtocol() == Http::CodecType::HTTP3 ? 2u : 0u); + expectUpstreamBytesSentAndReceived( + BytesCountExpectation(2550 + quic_https_extra_bytes, 635, 414 + quic_https_extra_bytes, 54), + BytesCountExpectation(2262, 548, 184, 27)); } TEST_P(ProtocolIntegrationTest, RetryStreaming) { @@ -3166,6 +3232,160 @@ TEST_P(ProtocolIntegrationTest, ResetLargeResponseUponReceivingHeaders) { codec_client_->close(); } +TEST_P(ProtocolIntegrationTest, HeaderOnlyBytesCountUpstream) { + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + testRouterRequestAndResponseWithBody(0, 0, false); + expectUpstreamBytesSentAndReceived(BytesCountExpectation(251, 38, 219, 18), + BytesCountExpectation(168, 13, 168, 13)); +} + +TEST_P(ProtocolIntegrationTest, HeaderOnlyBytesCountDownstream) { + if (upstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%"); + testRouterRequestAndResponseWithBody(0, 0, false); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(124, 111, 105, 75), + BytesCountExpectation(68, 64, 68, 64)); +} + +TEST_P(ProtocolIntegrationTest, HeaderAndBodyWireBytesCountUpstream) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + testRouterRequestAndResponseWithBody(100, 100, false); + expectUpstreamBytesSentAndReceived(BytesCountExpectation(371, 158, 228, 27), + BytesCountExpectation(277, 122, 168, 13)); +} + +TEST_P(ProtocolIntegrationTest, HeaderAndBodyWireBytesCountDownstream) { + // we only care about upstream protocol. + if (upstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + testRouterRequestAndResponseWithBody(100, 100, false); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(244, 231, 114, 84), + BytesCountExpectation(177, 173, 68, 64)); +} + +TEST_P(ProtocolIntegrationTest, TrailersWireBytesCountUpstream) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); + + testTrailers(10, 20, true, true); + + expectUpstreamBytesSentAndReceived(BytesCountExpectation(248, 120, 196, 67), + BytesCountExpectation(172, 81, 154, 52)); +} + +TEST_P(ProtocolIntegrationTest, TrailersWireBytesCountDownstream) { + // we only care about upstream protocol. + if (upstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); + + testTrailers(10, 20, true, true); + + expectDownstreamBytesSentAndReceived(BytesCountExpectation(206, 132, 156, 76), + BytesCountExpectation(136, 86, 107, 67)); +} + +TEST_P(ProtocolIntegrationTest, DownstreamDisconnectBeforeRequestCompleteWireBytesCountUpstream) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + + testRouterDownstreamDisconnectBeforeRequestComplete(nullptr); + + expectUpstreamBytesSentAndReceived(BytesCountExpectation(187, 0, 156, 0), + BytesCountExpectation(114, 0, 114, 0)); +} + +TEST_P(ProtocolIntegrationTest, DownstreamDisconnectBeforeRequestCompleteWireBytesCountDownstream) { + // we only care about upstream protocol. + if (upstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + + testRouterDownstreamDisconnectBeforeRequestComplete(nullptr); + + expectDownstreamBytesSentAndReceived(BytesCountExpectation(0, 71, 0, 38), + BytesCountExpectation(0, 28, 0, 28)); +} + +TEST_P(ProtocolIntegrationTest, UpstreamDisconnectBeforeRequestCompleteWireBytesCountUpstream) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + + testRouterUpstreamDisconnectBeforeRequestComplete(); + + expectUpstreamBytesSentAndReceived(BytesCountExpectation(187, 0, 156, 0), + BytesCountExpectation(114, 0, 114, 0)); +} + +TEST_P(ProtocolIntegrationTest, UpstreamDisconnectBeforeResponseCompleteWireBytesCountUpstream) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%\n"); + + testRouterUpstreamDisconnectBeforeResponseComplete(); + + expectUpstreamBytesSentAndReceived(BytesCountExpectation(159, 47, 128, 27), + BytesCountExpectation(113, 13, 113, 13)); +} + +TEST_P(DownstreamProtocolIntegrationTest, BadRequest) { + // we only care about upstream protocol. + if (downstreamProtocol() != Http::CodecType::HTTP1) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%\n"); + initialize(); + std::string response; + std::string full_request(100, '\r'); + full_request += "GET / HTTP/1.1\r\n path: /test/long/url\r\n" + "Host: host\r\ncontent-length: 0\r\n" + "transfer-encoding: chunked\r\n\r\n"; + + sendRawHttpAndWaitForResponse(lookupPort("http"), full_request.c_str(), &response, false); + + expectUpstreamBytesSentAndReceived(BytesCountExpectation(156, 200, 117, 0), + BytesCountExpectation(113, 13, 113, 0)); +} + TEST_P(DownstreamProtocolIntegrationTest, PathWithFragmentRejectedByDefault) { initialize(); @@ -3200,4 +3420,54 @@ TEST_P(ProtocolIntegrationTest, FragmentStrippedFromPathWithOverride) { EXPECT_EQ("200", response->headers().getStatusValue()); } +TEST_P(DownstreamProtocolIntegrationTest, ContentLengthSmallerThanPayload) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-length", "123"}}, + 1024); + if (downstreamProtocol() == Http::CodecType::HTTP1) { + waitForNextUpstreamRequest(); + // HTTP/1.x requests get the payload length from Content-Length header. The remaining bytes is + // parsed as another request. + EXPECT_EQ(123u, upstream_request_->body().length()); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_TRUE(response->complete()); + } else { + // Inconsistency in content-length header and the actually body length should be treated as a + // stream error. + ASSERT_TRUE(response->waitForReset()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); + } +} + +TEST_P(DownstreamProtocolIntegrationTest, ContentLengthLargerThanPayload) { + if (downstreamProtocol() == Http::CodecType::HTTP1) { + // HTTP/1.x request rely on Content-Length header to determine payload length. So there is no + // inconsistency but the request will hang there waiting for the rest bytes. + return; + } + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + codec_client_->makeRequestWithBody(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-length", "1025"}}, + 1024); + + // Inconsistency in content-length header and the actually body length should be treated as a + // stream error. + ASSERT_TRUE(response->waitForReset()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); +} + } // namespace Envoy diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 2b957e0ae46a..843d4c1b945c 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -7,6 +7,7 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" +#include "test/common/upstream/utility.h" #include "test/config/utility.h" #include "test/integration/http_integration.h" #include "test/test_common/test_runtime.h" @@ -30,7 +31,6 @@ #include "source/common/quic/active_quic_listener.h" #include "source/common/quic/client_connection_factory_impl.h" #include "source/common/quic/envoy_quic_client_session.h" -#include "source/common/quic/envoy_quic_client_connection.h" #include "source/common/quic/envoy_quic_proof_verifier.h" #include "source/common/quic/envoy_quic_connection_helper.h" #include "source/common/quic/envoy_quic_alarm_factory.h" @@ -63,6 +63,88 @@ class CodecClientCallbacksForTest : public Http::CodecClientCallbacks { Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset}; }; +// This class enables testing on QUIC path validation +class TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection { +public: + TestEnvoyQuicClientConnection(const quic::QuicConnectionId& server_connection_id, + Network::Address::InstanceConstSharedPtr& initial_peer_address, + quic::QuicConnectionHelperInterface& helper, + quic::QuicAlarmFactory& alarm_factory, + const quic::ParsedQuicVersionVector& supported_versions, + Network::Address::InstanceConstSharedPtr local_addr, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + bool validation_failure_on_path_response) + : EnvoyQuicClientConnection(server_connection_id, initial_peer_address, helper, alarm_factory, + supported_versions, local_addr, dispatcher, options), + dispatcher_(dispatcher), + validation_failure_on_path_response_(validation_failure_on_path_response) {} + + AssertionResult + waitForPathResponse(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout) { + bool timer_fired = false; + if (!saw_path_response_) { + Event::TimerPtr timer(dispatcher_.createTimer([this, &timer_fired]() -> void { + timer_fired = true; + dispatcher_.exit(); + })); + timer->enableTimer(timeout); + waiting_for_path_response_ = true; + dispatcher_.run(Event::Dispatcher::RunType::Block); + if (timer_fired) { + return AssertionFailure() << "Timed out waiting for path response\n"; + } + } + return AssertionSuccess(); + } + + bool OnPathResponseFrame(const quic::QuicPathResponseFrame& frame) override { + saw_path_response_ = true; + if (waiting_for_path_response_) { + dispatcher_.exit(); + } + if (!validation_failure_on_path_response_) { + return EnvoyQuicClientConnection::OnPathResponseFrame(frame); + } + CancelPathValidation(); + return connected(); + } + + AssertionResult + waitForHandshakeDone(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout) { + bool timer_fired = false; + if (!saw_handshake_done_) { + Event::TimerPtr timer(dispatcher_.createTimer([this, &timer_fired]() -> void { + timer_fired = true; + dispatcher_.exit(); + })); + timer->enableTimer(timeout); + waiting_for_handshake_done_ = true; + dispatcher_.run(Event::Dispatcher::RunType::Block); + if (timer_fired) { + return AssertionFailure() << "Timed out waiting for handshake done\n"; + } + } + return AssertionSuccess(); + } + + bool OnHandshakeDoneFrame(const quic::QuicHandshakeDoneFrame& frame) override { + saw_handshake_done_ = true; + if (waiting_for_handshake_done_) { + dispatcher_.exit(); + } + return EnvoyQuicClientConnection::OnHandshakeDoneFrame(frame); + } + +private: + Event::Dispatcher& dispatcher_; + bool saw_path_response_{false}; + bool saw_handshake_done_{false}; + bool waiting_for_path_response_{false}; + bool waiting_for_handshake_done_{false}; + bool validation_failure_on_path_response_{false}; +}; + // A test that sets up its own client connection with customized quic version and connection ID. class QuicHttpIntegrationTest : public HttpIntegrationTest, public testing::TestWithParam { @@ -72,8 +154,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, ConfigHelper::quicHttpProxyConfig()), supported_versions_(quic::CurrentSupportedHttp3Versions()), conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) { - // Enable this flag for test coverage. - SetQuicReloadableFlag(quic_tls_set_signature_algorithm_prefs, true); + SetQuicReloadableFlag(quic_remove_connection_migration_connection_option, true); } ~QuicHttpIntegrationTest() override { @@ -101,9 +182,10 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, // supported by server, this connection will fail. // TODO(danzh) Implement retry upon version mismatch and modify test frame work to specify a // different version set on server side to test that. - auto connection = std::make_unique( + auto connection = std::make_unique( getNextConnectionId(), server_addr_, conn_helper_, alarm_factory_, - quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr); + quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr, + validation_failure_on_path_response_); quic_connection_ = connection.get(); ASSERT(quic_connection_persistent_info_ != nullptr); auto& persistent_info = static_cast(*quic_connection_persistent_info_); @@ -122,11 +204,31 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, IntegrationCodecClientPtr makeRawHttpConnection( Network::ClientConnectionPtr&& conn, absl::optional http2_options) override { - IntegrationCodecClientPtr codec = - HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); - if (!codec->disconnected()) { - codec->setCodecClientCallbacks(client_codec_callback_); + std::shared_ptr cluster{new NiceMock()}; + cluster->max_response_headers_count_ = 200; + if (http2_options.has_value()) { + cluster->http3_options_ = ConfigHelper::http2ToHttp3ProtocolOptions( + http2_options.value(), quic::kStreamReceiveWindowLimit); } + cluster->http3_options_.set_allow_extended_connect(true); + *cluster->http3_options_.mutable_quic_protocol_options() = client_quic_options_; + Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( + cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)), + timeSystem())}; + // This call may fail in QUICHE because of INVALID_VERSION. QUIC connection doesn't support + // in-connection version negotiation. + auto codec = std::make_unique(*dispatcher_, random_, std::move(conn), + host_description, downstream_protocol_); + if (codec->disconnected()) { + // Connection may get closed during version negotiation or handshake. + // TODO(#8479) QUIC connection doesn't support in-connection version negotiationPropagate + // INVALID_VERSION error to caller and let caller to use server advertised version list to + // create a new connection with mutually supported version and make client codec again. + ENVOY_LOG(error, "Fail to connect to server with error: {}", + codec->connection()->transportFailureReason()); + return codec; + } + codec->setCodecClientCallbacks(client_codec_callback_); return codec; } @@ -226,9 +328,11 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, EnvoyQuicAlarmFactory alarm_factory_; CodecClientCallbacksForTest client_codec_callback_; Network::Address::InstanceConstSharedPtr server_addr_; - EnvoyQuicClientConnection* quic_connection_{nullptr}; + envoy::config::core::v3::QuicProtocolOptions client_quic_options_; + TestEnvoyQuicClientConnection* quic_connection_{nullptr}; std::list designated_connection_ids_; Quic::QuicClientTransportSocketFactory* transport_socket_factory_{nullptr}; + bool validation_failure_on_path_response_{false}; }; INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest, @@ -239,6 +343,33 @@ TEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) { testRouterHeaderOnlyRequestAndResponse(); } +TEST_P(QuicHttpIntegrationTest, Draft29NotSupportedByDefault) { + supported_versions_ = {quic::ParsedQuicVersion::Draft29()}; + initialize(); + codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), absl::nullopt); + EXPECT_TRUE(codec_client_->disconnected()); + EXPECT_EQ(quic::QUIC_INVALID_VERSION, + static_cast(codec_client_->connection())->error()); +} + +TEST_P(QuicHttpIntegrationTest, RuntimeEnableDraft29) { + supported_versions_ = {quic::ParsedQuicVersion::Draft29()}; + config_helper_.addRuntimeOverride( + "envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_disable_version_draft_29", + "false"); + initialize(); + + codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), absl::nullopt); + EXPECT_EQ(transport_socket_factory_->clientContextConfig().serverNameIndication(), + codec_client_->connection()->requestedServerName()); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + ASSERT_TRUE(response->waitForEndStream()); + codec_client_->close(); + test_server_->waitForCounterEq("http3.quic_version_h3_29", 1u); +} + TEST_P(QuicHttpIntegrationTest, ZeroRtt) { // Make sure both connections use the same PersistentQuicInfoImpl. concurrency_ = 1; @@ -267,6 +398,8 @@ TEST_P(QuicHttpIntegrationTest, ZeroRtt) { EXPECT_TRUE(static_cast( quic::test::QuicSessionPeer::GetMutableCryptoStream(quic_session)) ->EarlyDataAccepted()); + EXPECT_NE(quic_session->ssl(), nullptr); + EXPECT_TRUE(quic_session->ssl()->peerCertificateValidated()); // Close the second connection. codec_client_->close(); if (GetParam() == Network::Address::IpVersion::v4) { @@ -366,6 +499,120 @@ TEST_P(QuicHttpIntegrationTest, PortMigration) { cleanupUpstreamAndDownstream(); } +TEST_P(QuicHttpIntegrationTest, PortMigrationOnPathDegrading) { + concurrency_ = 2; + initialize(); + uint32_t old_port = lookupPort("http"); + codec_client_ = makeHttpConnection(old_port); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + codec_client_->sendData(*request_encoder_, 1024u, false); + + ASSERT_TRUE(quic_connection_->waitForHandshakeDone()); + auto old_self_addr = quic_connection_->self_address(); + quic_connection_->OnPathDegradingDetected(); + ASSERT_TRUE(quic_connection_->waitForPathResponse()); + auto self_addr = quic_connection_->self_address(); + EXPECT_NE(old_self_addr, self_addr); + + // Send the rest data. + codec_client_->sendData(*request_encoder_, 1024u, true); + waitForNextUpstreamRequest(0, TestUtility::DefaultTimeout); + // Send response headers, and end_stream if there is no response body. + const Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + size_t response_size{5u}; + upstream_request_->encodeHeaders(response_headers, false); + upstream_request_->encodeData(response_size, true); + ASSERT_TRUE(response->waitForEndStream()); + verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(1024u * 2, upstream_request_->bodyLength()); +} + +TEST_P(QuicHttpIntegrationTest, NoPortMigrationWithoutConfig) { + concurrency_ = 2; + initialize(); + client_quic_options_.mutable_num_timeouts_to_trigger_port_migration()->set_value(0); + uint32_t old_port = lookupPort("http"); + codec_client_ = makeHttpConnection(old_port); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + codec_client_->sendData(*request_encoder_, 1024u, false); + + ASSERT_TRUE(quic_connection_->waitForHandshakeDone()); + auto old_self_addr = quic_connection_->self_address(); + quic_connection_->OnPathDegradingDetected(); + ASSERT_FALSE(quic_connection_->waitForPathResponse(std::chrono::milliseconds(2000))); + auto self_addr = quic_connection_->self_address(); + EXPECT_EQ(old_self_addr, self_addr); + + // Send the rest data. + codec_client_->sendData(*request_encoder_, 1024u, true); + waitForNextUpstreamRequest(0, TestUtility::DefaultTimeout); + // Send response headers, and end_stream if there is no response body. + const Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + size_t response_size{5u}; + upstream_request_->encodeHeaders(response_headers, false); + upstream_request_->encodeData(response_size, true); + ASSERT_TRUE(response->waitForEndStream()); + verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(1024u * 2, upstream_request_->bodyLength()); +} + +TEST_P(QuicHttpIntegrationTest, PortMigrationFailureOnPathDegrading) { + concurrency_ = 2; + validation_failure_on_path_response_ = true; + initialize(); + uint32_t old_port = lookupPort("http"); + codec_client_ = makeHttpConnection(old_port); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + codec_client_->sendData(*request_encoder_, 1024u, false); + + ASSERT_TRUE(quic_connection_->waitForHandshakeDone()); + auto old_self_addr = quic_connection_->self_address(); + quic_connection_->OnPathDegradingDetected(); + ASSERT_TRUE(quic_connection_->waitForPathResponse()); + auto self_addr = quic_connection_->self_address(); + // The path validation will fail and thus client self address will not change. + EXPECT_EQ(old_self_addr, self_addr); + + // Send the rest data. + codec_client_->sendData(*request_encoder_, 1024u, true); + waitForNextUpstreamRequest(0, TestUtility::DefaultTimeout); + // Send response headers, and end_stream if there is no response body. + const Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + size_t response_size{5u}; + upstream_request_->encodeHeaders(response_headers, false); + upstream_request_->encodeData(response_size, true); + ASSERT_TRUE(response->waitForEndStream()); + verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(1024u * 2, upstream_request_->bodyLength()); +} + TEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(Http::CodecType::HTTP1); } @@ -446,6 +693,100 @@ TEST_P(QuicHttpIntegrationTest, ResetRequestWithInvalidCharacter) { ASSERT_TRUE(response->waitForReset()); } +TEST_P(QuicHttpIntegrationTest, Http3ClientKeepalive) { + initialize(); + + constexpr uint64_t max_interval_sec = 5; + constexpr uint64_t initial_interval_sec = 1; + // Set connection idle network timeout to be a little larger than max interval. + dynamic_cast(*quic_connection_persistent_info_) + .quic_config_.SetIdleNetworkTimeout(quic::QuicTime::Delta::FromSeconds(max_interval_sec + 2)); + client_quic_options_.mutable_connection_keepalive()->mutable_max_interval()->set_seconds( + max_interval_sec); + client_quic_options_.mutable_connection_keepalive()->mutable_initial_interval()->set_seconds( + initial_interval_sec); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + + // Wait for 10s before sending back response. If keepalive is disabled, the + // connection would have idle timed out. + Event::TimerPtr timer(dispatcher_->createTimer([this]() -> void { dispatcher_->exit(); })); + timer->enableTimer(std::chrono::seconds(10)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"set-cookie", "foo"}, + {"set-cookie", "bar"}}, + true); + EXPECT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + // First 6 PING frames should be sent every 1s, and the following ones less frequently. + EXPECT_LE(quic_connection_->GetStats().ping_frames_sent, 8u); +} + +TEST_P(QuicHttpIntegrationTest, Http3ClientKeepaliveDisabled) { + initialize(); + + constexpr uint64_t max_interval_sec = 0; + constexpr uint64_t initial_interval_sec = 1; + // Set connection idle network timeout to be a little larger than max interval. + dynamic_cast(*quic_connection_persistent_info_) + .quic_config_.SetIdleNetworkTimeout(quic::QuicTime::Delta::FromSeconds(5)); + client_quic_options_.mutable_connection_keepalive()->mutable_max_interval()->set_seconds( + max_interval_sec); + client_quic_options_.mutable_connection_keepalive()->mutable_initial_interval()->set_seconds( + initial_interval_sec); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + + // As keepalive is disabled, the connection will timeout after 5s. + EXPECT_TRUE(response->waitForReset()); + EXPECT_EQ(quic_connection_->GetStats().ping_frames_sent, 0u); +} + +TEST_P(QuicHttpIntegrationTest, Http3DownstreamKeepalive) { + constexpr uint64_t max_interval_sec = 5; + constexpr uint64_t initial_interval_sec = 1; + config_helper_.addConfigModifier( + [=](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* keepalive_options = hcm.mutable_http3_protocol_options() + ->mutable_quic_protocol_options() + ->mutable_connection_keepalive(); + keepalive_options->mutable_initial_interval()->set_seconds(initial_interval_sec); + keepalive_options->mutable_max_interval()->set_seconds(max_interval_sec); + }); + // Set connection idle network timeout to be a little larger than max interval. + config_helper_.addConfigModifier([=](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_udp_listener_config() + ->mutable_quic_options() + ->mutable_idle_timeout() + ->set_seconds(max_interval_sec + 2); + }); + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + + // Wait for 10s before sending back response. If keepalive is disabled, the + // connection would have idle timed out. + Event::TimerPtr timer(dispatcher_->createTimer([this]() -> void { dispatcher_->exit(); })); + timer->enableTimer(std::chrono::seconds(10)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"set-cookie", "foo"}, + {"set-cookie", "bar"}}, + true); + EXPECT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); +} + class QuicInplaceLdsIntegrationTest : public QuicHttpIntegrationTest { public: void inplaceInitialize(bool add_default_filter_chain = false) { diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index a88b83a1917b..805ca4e882c8 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -187,6 +187,70 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { EXPECT_THAT(waitForAccessLog(access_log_name_, 1), HasSubstr("200 via_upstream -\n")); } +TEST_P(RedirectIntegrationTest, BasicInternalRedirectDownstreamBytesCount) { + if (upstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%DOWNSTREAM_WIRE_BYTES_SENT% %DOWNSTREAM_WIRE_BYTES_RECEIVED% " + "%DOWNSTREAM_HEADER_BYTES_SENT% %DOWNSTREAM_HEADER_BYTES_RECEIVED%"); + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(redirect_response_, true); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(0, 63, 0, 31), + BytesCountExpectation(0, 42, 0, 42), 0); + expectDownstreamBytesSentAndReceived(BytesCountExpectation(140, 63, 121, 31), + BytesCountExpectation(77, 42, 77, 42), 1); +} + +TEST_P(RedirectIntegrationTest, BasicInternalRedirectUpstreamBytesCount) { + if (downstreamProtocol() != Http::CodecType::HTTP2) { + return; + } + useAccessLog("%UPSTREAM_WIRE_BYTES_SENT% %UPSTREAM_WIRE_BYTES_RECEIVED% " + "%UPSTREAM_HEADER_BYTES_SENT% %UPSTREAM_HEADER_BYTES_RECEIVED%"); + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(redirect_response_, true); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + expectUpstreamBytesSentAndReceived(BytesCountExpectation(195, 110, 164, 85), + BytesCountExpectation(137, 64, 137, 64), 0); + expectUpstreamBytesSentAndReceived(BytesCountExpectation(244, 38, 219, 18), + BytesCountExpectation(85, 10, 85, 10), 1); +} + TEST_P(RedirectIntegrationTest, InternalRedirectStripsUriFragment) { // Validate that header sanitization is only called once. config_helper_.addConfigModifier( diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index 98e4489abf58..18a677e349f5 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -85,7 +85,14 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H RtdsIntegrationTest() : HttpIntegrationTest( Http::CodecType::HTTP2, ipVersion(), - tdsBootstrapConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + tdsBootstrapConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw + ? "GRPC" + : "DELTA_GRPC")) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } use_lds_ = false; create_xds_upstream_ = true; sotw_or_delta_ = sotwOrDelta(); diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index b76a18cda3b3..f114338e4358 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -14,12 +14,160 @@ #include "test/test_common/printers.h" #include "test/test_common/resources.h" +#include "absl/strings/str_cat.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace Envoy { namespace { +class InlineScopedRoutesIntegrationTest : public HttpIntegrationTest, public testing::Test { +protected: + InlineScopedRoutesIntegrationTest() + : HttpIntegrationTest(Http::CodecType::HTTP1, Network::Address::IpVersion::v4) {} + + void setScopedRoutesConfig(absl::string_view config_yaml) { + config_helper_.addConfigModifier( + [config_yaml]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes* + scoped_routes = hcm.mutable_scoped_routes(); + const std::string scoped_routes_yaml = absl::StrCat(R"EOF( +name: foo-scoped-routes +scope_key_builder: + fragments: + - header_value_extractor: + name: Addr + element_separator: ; + element: + key: x-foo-key + separator: = +)EOF", + config_yaml); + TestUtility::loadFromYaml(scoped_routes_yaml, *scoped_routes); + }); + } +}; + +TEST_F(InlineScopedRoutesIntegrationTest, NoScopeFound) { + absl::string_view config_yaml = R"EOF( +scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: cluster_0 } + key: + fragments: { string_key: foo } +)EOF"; + setScopedRoutesConfig(config_yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":authority", "host"}, + {":scheme", "http"}, + // "xyz-route" is not a configured scope key. + {"Addr", "x-foo-key=xyz-route"}}); + ASSERT_TRUE(response->waitForEndStream()); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); + cleanupUpstreamAndDownstream(); +} + +TEST_F(InlineScopedRoutesIntegrationTest, ScopeWithSingleRouteConfiguration) { + absl::string_view config_yaml = R"EOF( +scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: cluster_0 } + key: + fragments: { string_key: foo } +)EOF"; + setScopedRoutesConfig(config_yaml); + initialize(); + + sendRequestAndVerifyResponse( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=foo"}}, + /*request_size=*/0, Http::TestResponseHeaderMapImpl{{":status", "200"}, {"service", "foo"}}, + /*response_size=*/0, + /*backend_idx=*/0); +} + +TEST_F(InlineScopedRoutesIntegrationTest, ScopeWithMultipleRouteConfigurations) { + absl::string_view config_yaml = R"EOF( +scoped_route_configurations_list: + scoped_route_configurations: + - name: foo-scope + route_configuration: + name: foo + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: cluster_0 } + response_headers_to_add: + header: { key: route-name, value: foo } + key: + fragments: { string_key: foo } + - name: baz-scope + route_configuration: + name: baz + virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: cluster_0 } + response_headers_to_add: + header: { key: route-name, value: baz } + key: + fragments: { string_key: baz } + +)EOF"; + setScopedRoutesConfig(config_yaml); + initialize(); + + sendRequestAndVerifyResponse( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=baz"}}, + /*request_size=*/0, Http::TestResponseHeaderMapImpl{{":status", "200"}}, + /*response_size=*/0, + /*backend_idx=*/0, Http::TestResponseHeaderMapImpl{{"route-name", "baz"}}); + sendRequestAndVerifyResponse( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":authority", "host"}, + {":scheme", "http"}, + {"Addr", "x-foo-key=foo"}}, + /*request_size=*/0, Http::TestResponseHeaderMapImpl{{":status", "200"}}, + /*response_size=*/0, + /*backend_idx=*/0, Http::TestResponseHeaderMapImpl{{"route-name", "foo"}}); + ; +} + class ScopedRdsIntegrationTest : public HttpIntegrationTest, public Grpc::DeltaSotwIntegrationParamTest { protected: @@ -29,7 +177,12 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, absl::flat_hash_map stream_by_resource_name_; }; - ScopedRdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} + ScopedRdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) { + if (sotwOrDelta() == Grpc::SotwOrDelta::UnifiedSotw || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } + } ~ScopedRdsIntegrationTest() override { resetConnections(); } @@ -249,7 +402,10 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, response); } - bool isDelta() { return sotwOrDelta() == Grpc::SotwOrDelta::Delta; } + bool isDelta() { + return sotwOrDelta() == Grpc::SotwOrDelta::Delta || + sotwOrDelta() == Grpc::SotwOrDelta::UnifiedDelta; + } const std::string srds_config_name_{"foo-scoped-routes"}; FakeUpstreamInfo scoped_rds_upstream_info_; diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index 47d19d59fa54..ac712f715602 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -67,7 +67,7 @@ class SdsStaticDownstreamIntegrationTest tls_certificate->mutable_private_key()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/serverkey.pem")); }); - ASSERT(Thread::MainThread::isMainThread()); + ASSERT(Thread::MainThread::isMainOrTestThread()); HttpIntegrationTest::initialize(); registerTestServerPorts({"http"}); diff --git a/test/integration/server.h b/test/integration/server.h index 5144f2203646..880b899d2f3a 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -478,6 +478,11 @@ class IntegrationTestServer : public Logger::Loggable, notifyingStatsAllocator().waitForCounterExists(name); } + // TODO(#17956): Add Gauge type to NotifyingAllocator and adopt it in this method. + void waitForGaugeDestroyed(const std::string& name) override { + ASSERT_TRUE(TestUtility::waitForGaugeDestroyed(statStore(), name, time_system_)); + } + void waitUntilHistogramHasSamples( const std::string& name, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { @@ -497,6 +502,10 @@ class IntegrationTestServer : public Logger::Loggable, return TestUtility::findGauge(statStore(), name); } + Stats::ParentHistogramSharedPtr histogram(const std::string& name) { + return TestUtility::findHistogram(statStore(), name); + } + std::vector counters() override { return statStore().counters(); } std::vector gauges() override { return statStore().gauges(); } diff --git a/test/integration/server_stats.h b/test/integration/server_stats.h index 66cb7e07e7d2..d4520d3456db 100644 --- a/test/integration/server_stats.h +++ b/test/integration/server_stats.h @@ -66,6 +66,12 @@ class IntegrationTestServerStats { waitForGaugeEq(const std::string& name, uint64_t value, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; + /** + * Wait for a gauge to be destroyed. Note that MockStatStore does not destroy stat. + * @param name gauge name. + */ + virtual void waitForGaugeDestroyed(const std::string& name) PURE; + /** * Counter lookup. This is not thread safe, since we don't get a consistent * snapshot, uses counters() instead for this behavior. diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index ffe0dcbc742c..14b2b8f61bf7 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -270,6 +270,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // 2020/10/02 13251 39326 switch to google tcmalloc // 2021/08/15 17290 40349 add all host map to priority set for fast host // searching + // 2021/08/18 13176 40577 40700 Support slow start mode // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -290,7 +291,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // https://github.com/envoyproxy/envoy/issues/12209 // EXPECT_MEMORY_EQ(m_per_cluster, 37061); } - EXPECT_MEMORY_LE(m_per_cluster, 40350); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_cluster, 40700); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/integration/tcp_dump.cc b/test/integration/tcp_dump.cc index bd31e60606ee..41abde689204 100644 --- a/test/integration/tcp_dump.cc +++ b/test/integration/tcp_dump.cc @@ -61,7 +61,7 @@ TcpDump::TcpDump(const std::string& path, const std::string& iface, break; } // Give 50ms sleep. - ::usleep(50000); + ::usleep(50000); // NO_CHECK_FORMAT(real_time) } #endif } diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index ea3d79afd589..b98c29034882 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -488,39 +488,83 @@ class TcpTunnelingIntegrationTest : public HttpProtocolIntegrationTest { }); HttpProtocolIntegrationTest::SetUp(); } + + void setUpConnection(FakeHttpConnectionPtr& fake_upstream_connection) { + // Start a connection, and verify the upgrade headers are received upstream. + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); + if (!fake_upstream_connection) { + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection)); + } + ASSERT_TRUE(fake_upstream_connection->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + // Send upgrade headers downstream, fully establishing the connection. + upstream_request_->encodeHeaders(default_response_headers_, false); + } + + void sendBidiData(FakeHttpConnectionPtr& fake_upstream_connection, bool send_goaway = false) { + // Send some data from downstream to upstream, and make sure it goes through. + ASSERT_TRUE(tcp_client_->write("hello", false)); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + + if (send_goaway) { + fake_upstream_connection->encodeGoAway(); + } + // Send data from upstream to downstream. + upstream_request_->encodeData(12, false); + ASSERT_TRUE(tcp_client_->waitForData(12)); + } + + void closeConnection(FakeHttpConnectionPtr& fake_upstream_connection) { + // Now send more data and close the TCP client. This should be treated as half close, so the + // data should go through. + ASSERT_TRUE(tcp_client_->write("hello", false)); + tcp_client_->close(); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + if (upstreamProtocol() == Http::CodecType::HTTP1) { + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); + } else { + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + // If the upstream now sends 'end stream' the connection is fully closed. + upstream_request_->encodeData(0, true); + } + } + + IntegrationTcpClientPtr tcp_client_; }; TEST_P(TcpTunnelingIntegrationTest, Basic) { initialize(); - // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - - // Send upgrade headers downstream, fully establishing the connection. - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); + closeConnection(fake_upstream_connection_); +} - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); +TEST_P(TcpTunnelingIntegrationTest, SendDataUpstreamAfterUpstreamClose) { + if (upstreamProtocol() == Http::CodecType::HTTP1) { + // HTTP/1.1 can't frame with FIN bits. + return; + } + initialize(); - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); + // Close upstream. + upstream_request_->encodeData(2, true); + tcp_client_->waitForHalfClose(); - // Now send more data and close the TCP client. This should be treated as half close, so the data - // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); + // Now send data upstream. + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + + // Finally close and clean up. + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - // If the upstream now sends 'end stream' the connection is fully closed. - upstream_request_->encodeData(0, true); } } @@ -548,7 +592,7 @@ TEST_P(TcpTunnelingIntegrationTest, BasicUsePost) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -557,33 +601,39 @@ TEST_P(TcpTunnelingIntegrationTest, BasicUsePost) { // Send upgrade headers downstream, fully establishing the connection. upstream_request_->encodeHeaders(default_response_headers_, false); - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + sendBidiData(fake_upstream_connection_); + closeConnection(fake_upstream_connection_); +} - // Now send more data and close the TCP client. This should be treated as half close, so the data - // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); +TEST_P(TcpTunnelingIntegrationTest, Goaway) { if (upstreamProtocol() == Http::CodecType::HTTP1) { - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } else { - ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - // If the upstream now sends 'end stream' the connection is fully closed. - upstream_request_->encodeData(0, true); + return; } + initialize(); + + // Send bidirectional data, including a goaway. + // This should result in the first connection being torn down. + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_, true); + closeConnection(fake_upstream_connection_); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 1); + + // Make sure a subsequent connection can be established successfully. + FakeHttpConnectionPtr fake_upstream_connection; + setUpConnection(fake_upstream_connection); + sendBidiData(fake_upstream_connection); + closeConnection(fake_upstream_connection_); + + // Make sure the last stream is finished before doing test teardown. + fake_upstream_connection->encodeGoAway(); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 2); } TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -600,23 +650,15 @@ TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { // The connection should be fully closed, but the client has no way of knowing // that. Ensure the FIN is read and clean up state. - tcp_client->waitForHalfClose(); - tcp_client->close(); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); } TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); - - // Send data in both directions. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. @@ -624,19 +666,19 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->close()); } - ASSERT_TRUE(tcp_client->waitForData(12)); - tcp_client->waitForHalfClose(); + ASSERT_TRUE(tcp_client_->waitForData(12)); + tcp_client_->waitForHalfClose(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } else { // Attempt to send data upstream. // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - ASSERT_TRUE(tcp_client->write("hello", true)); + ASSERT_TRUE(tcp_client_->write("hello", true)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -649,16 +691,11 @@ TEST_P(TcpTunnelingIntegrationTest, ResetStreamTest) { enableHalfClose(false); initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Reset the stream. upstream_request_->encodeResetStream(); - tcp_client->waitForDisconnect(); + tcp_client_->waitForDisconnect(); } TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { @@ -681,20 +718,16 @@ TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { initialize(); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); std::string data(1024 * 16, 'a'); - ASSERT_TRUE(tcp_client->write(data)); + ASSERT_TRUE(tcp_client_->write(data)); upstream_request_->encodeData(data, false); - tcp_client->waitForDisconnect(); + tcp_client_->waitForDisconnect(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } else { ASSERT_TRUE(upstream_request_->waitForReset()); } @@ -707,36 +740,32 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { config_helper_.setBufferLimits(size / 4, size / 4); initialize(); - std::string data(size, 'a'); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); - tcp_client->readDisable(true); + tcp_client_->readDisable(true); + std::string data(size, 'a'); if (upstreamProtocol() == Http::CodecType::HTTP1) { - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); upstream_request_->encodeData(data, true); ASSERT_TRUE(fake_upstream_connection_->close()); } else { - ASSERT_TRUE(tcp_client->write("", true)); + ASSERT_TRUE(tcp_client_->write("", true)); // This ensures that readDisable(true) has been run on its thread - // before tcp_client starts writing. + // before tcp_client_ starts writing. ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); upstream_request_->encodeData(data, true); } test_server_->waitForCounterGe("cluster.cluster_0.upstream_flow_control_paused_reading_total", 1); - tcp_client->readDisable(false); - tcp_client->waitForData(data); - tcp_client->waitForHalfClose(); + tcp_client_->readDisable(false); + tcp_client_->waitForData(data); + tcp_client_->waitForHalfClose(); if (upstreamProtocol() == Http::CodecType::HTTP1) { - tcp_client->close(); + tcp_client_->close(); } } @@ -754,22 +783,19 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { config_helper_.setBufferLimits(size, size); initialize(); - std::string data(size, 'a'); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); + upstream_request_->readDisable(true); upstream_request_->encodeData("hello", false); // This ensures that fake_upstream_connection->readDisable has been run on its thread - // before tcp_client starts writing. - ASSERT_TRUE(tcp_client->waitForData(5)); + // before tcp_client_ starts writing. + ASSERT_TRUE(tcp_client_->waitForData(5)); - ASSERT_TRUE(tcp_client->write(data, true)); + std::string data(size, 'a'); + ASSERT_TRUE(tcp_client_->write(data, true)); if (upstreamProtocol() == Http::CodecType::HTTP1) { - tcp_client->close(); + tcp_client_->close(); upstream_request_->readDisable(false); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); @@ -782,7 +808,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); upstream_request_->encodeData("world", true); - tcp_client->waitForHalfClose(); + tcp_client_->waitForHalfClose(); } } @@ -793,42 +819,37 @@ TEST_P(TcpTunnelingIntegrationTest, ConnectionReuse) { } initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData("world1", true); - tcp_client1->waitForData("world1"); - tcp_client1->waitForHalfClose(); - tcp_client1->close(); + tcp_client_->waitForData("world1"); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // The new CONNECT stream is established in the existing h2 connection. ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello2", false)); + ASSERT_TRUE(tcp_client_2->write("hello2", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData("world2", true); - tcp_client2->waitForData("world2"); - tcp_client2->waitForHalfClose(); - tcp_client2->close(); + tcp_client_2->waitForData("world2"); + tcp_client_2->waitForHalfClose(); + tcp_client_2->close(); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -839,36 +860,31 @@ TEST_P(TcpTunnelingIntegrationTest, H1NoConnectionReuse) { } initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream and close the connection // from downstream. upstream_request_->encodeData("world1", false); - tcp_client1->waitForData("world1"); - tcp_client1->close(); + tcp_client_->waitForData("world1"); + tcp_client_->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // A new connection is established ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello1", false)); + ASSERT_TRUE(tcp_client_2->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); - tcp_client2->close(); + tcp_client_2->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -881,41 +897,41 @@ TEST_P(TcpTunnelingIntegrationTest, H1UpstreamCloseNoConnectionReuse) { initialize(); // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_1 = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_1->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream and close the connection // from the upstream. upstream_request_->encodeData("world1", false); - tcp_client1->waitForData("world1"); + tcp_client_1->waitForData("world1"); ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client1->waitForHalfClose(); - tcp_client1->close(); + tcp_client_1->waitForHalfClose(); + tcp_client_1->close(); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // A new connection is established ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello2", false)); + ASSERT_TRUE(tcp_client_2->write("hello2", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client2->waitForHalfClose(); - tcp_client2->close(); + tcp_client_2->waitForHalfClose(); + tcp_client_2->close(); } TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { @@ -925,7 +941,7 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -935,16 +951,10 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { default_response_headers_.setStatus(enumToInt(Http::Code::Accepted)); upstream_request_->encodeHeaders(default_response_headers_, false); - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + sendBidiData(fake_upstream_connection_); // Close the downstream connection and wait for upstream disconnect - tcp_client->close(); + tcp_client_->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -955,7 +965,7 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -968,11 +978,11 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { // Send data from upstream to downstream. upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + ASSERT_TRUE(tcp_client_->waitForData(12)); // Now send some data and close the TCP client. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); + ASSERT_TRUE(tcp_client_->write("hello", false)); + tcp_client_->close(); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -984,7 +994,7 @@ TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Using raw connection to be able to set Transfer-encoding header. FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -998,24 +1008,24 @@ TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { fake_upstream_connection->write("HTTP/1.1 200 OK\r\nTransfer-encoding: chunked\r\n\r\n")); // Now send some data and close the TCP client. - ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(tcp_client_->write("hello")); ASSERT_TRUE( fake_upstream_connection->waitForData(FakeRawConnection::waitForInexactMatch("hello"))); // Close connections. ASSERT_TRUE(fake_upstream_connection->close()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } TEST_P(TcpTunnelingIntegrationTest, DeferTransmitDataUntilSuccessConnectResponseIsReceived) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Send some data straight away. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); @@ -1028,7 +1038,7 @@ TEST_P(TcpTunnelingIntegrationTest, DeferTransmitDataUntilSuccessConnectResponse ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - tcp_client->close(); + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { @@ -1042,10 +1052,10 @@ TEST_P(TcpTunnelingIntegrationTest, NoDataTransmittedIfConnectFailureResponseIsR initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Send some data straight away. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); @@ -1057,7 +1067,7 @@ TEST_P(TcpTunnelingIntegrationTest, NoDataTransmittedIfConnectFailureResponseIsR // Wait a bit, no data should go through. ASSERT_FALSE(upstream_request_->waitForData(*dispatcher_, 1, std::chrono::milliseconds(100))); - tcp_client->close(); + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { @@ -1069,15 +1079,15 @@ TEST_P(TcpTunnelingIntegrationTest, UpstreamDisconnectBeforeResponseReceived) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); ASSERT_TRUE(fake_upstream_connection_->close()); - tcp_client->waitForHalfClose(); - tcp_client->close(); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); } INSTANTIATE_TEST_SUITE_P(IpAndHttpVersions, TcpTunnelingIntegrationTest, diff --git a/test/integration/typed_metadata_integration_test.cc b/test/integration/typed_metadata_integration_test.cc new file mode 100644 index 000000000000..b5aac630c186 --- /dev/null +++ b/test/integration/typed_metadata_integration_test.cc @@ -0,0 +1,46 @@ +#include "source/common/protobuf/protobuf.h" + +#include "test/integration/http_protocol_integration.h" +#include "test/integration/integration.h" +#include "test/integration/utility.h" +#include "test/server/utility.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +using ListenerTypedMetadataIntegrationTest = ::Envoy::HttpProtocolIntegrationTest; + +INSTANTIATE_TEST_SUITE_P(Protocols, ListenerTypedMetadataIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +TEST_P(ListenerTypedMetadataIntegrationTest, Hello) { + // Add some typed metadata to the listener. + ProtobufWkt::StringValue value; + value.set_value("hello world"); + ProtobufWkt::Any packed_value; + packed_value.PackFrom(value); + config_helper_.addListenerTypedMetadata("test.listener.typed.metadata", packed_value); + + // Add the filter that reads the listener typed metadata. + config_helper_.addFilter(R"EOF({ + name: listener-typed-metadata-filter, + typed_config: { + "@type": type.googleapis.com/google.protobuf.Empty + } + })EOF"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Main assertion on parsing the typed metadata is in the filter. + // Here we just ensure the filter was created (so we know those assertions ran). + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 10); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +} // namespace Envoy diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index a4362b80e559..77cbe5f3e2bb 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -29,7 +29,7 @@ TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedStructConfig) { config_helper_.prependFilter(absl::StrCat(R"EOF( name: ip_tagging typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct + "@type": type.googleapis.com/xds.type.v3.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.http.ip_tagging.v3.IPTagging value: )EOF", diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index 8edd51e96e8b..6ccf84a38152 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -154,10 +154,13 @@ domains: [{}] )EOF"; class VhdsInitializationTest : public HttpIntegrationTest, - public Grpc::GrpcClientIntegrationParamTest { + public Grpc::UnifiedOrLegacyMuxIntegrationParamTest { public: VhdsInitializationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), config()) { use_lds_ = false; + if (isUnified()) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } } void TearDown() override { cleanUpXdsConnection(); } @@ -211,7 +214,7 @@ class VhdsInitializationTest : public HttpIntegrationTest, }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsInitializationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + UNIFIED_LEGACY_GRPC_CLIENT_INTEGRATION_PARAMS); // tests a scenario when: // - RouteConfiguration without VHDS is received @@ -250,10 +253,13 @@ TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) { } class VhdsIntegrationTest : public HttpIntegrationTest, - public Grpc::GrpcClientIntegrationParamTest { + public Grpc::UnifiedOrLegacyMuxIntegrationParamTest { public: VhdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), config()) { use_lds_ = false; + if (isUnified()) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } } void TearDown() override { cleanUpXdsConnection(); } @@ -397,7 +403,8 @@ class VhdsIntegrationTest : public HttpIntegrationTest, bool use_rds_with_vhosts{false}; }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, + UNIFIED_LEGACY_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "host"); diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 7a11e6a15647..b225c4e1f527 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -301,6 +301,9 @@ class LdsInplaceUpdateHttpIntegrationTest std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); config_helper_.addListenerFilter(tls_inspector_config); config_helper_.addSslConfig(); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_stat_prefix()->assign("hcm0"); }); config_helper_.addConfigModifier([this, add_default_filter_chain]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { if (!use_default_balancer_) { @@ -335,6 +338,7 @@ class LdsInplaceUpdateHttpIntegrationTest ->mutable_routes(0) ->mutable_route() ->set_cluster("cluster_1"); + hcm_config.mutable_stat_prefix()->assign("hcm1"); config_blob->PackFrom(hcm_config); bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( *bootstrap.mutable_static_resources()->mutable_clusters(0)); @@ -381,7 +385,7 @@ class LdsInplaceUpdateHttpIntegrationTest } } - void expectConnenctionServed(std::string alpn = "alpn0") { + void expectConnectionServed(std::string alpn = "alpn0") { auto codec_client_after_config_update = createHttpCodec(alpn); expectResponseHeaderConnectionClose(*codec_client_after_config_update, false); codec_client_after_config_update->close(); @@ -395,7 +399,7 @@ class LdsInplaceUpdateHttpIntegrationTest }; // Verify that http response on filter chain 1 and default filter chain have "Connection: close" -// header when these 2 filter chains are deleted during the listener update. +// header when these 2 filter chains are deleted during the listener update. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { inplaceInitialize(/*add_default_filter_chain=*/true); @@ -403,12 +407,6 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { auto codec_client_0 = createHttpCodec("alpn0"); auto codec_client_default = createHttpCodec("alpndefault"); - Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get(), - c_default = codec_client_default.get()]() { - c1->close(); - c0->close(); - c_default->close(); - }); ConfigHelper new_config_helper( version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); new_config_helper.addConfigModifier( @@ -422,12 +420,20 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 1); + test_server_->waitForGaugeGe("http.hcm1.downstream_cx_active", 1); + expectResponseHeaderConnectionClose(*codec_client_1, true); expectResponseHeaderConnectionClose(*codec_client_default, true); test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); + + codec_client_1->close(); + test_server_->waitForGaugeDestroyed("http.hcm1.downstream_cx_active"); + codec_client_0->close(); + codec_client_default->close(); } // Verify that http clients of filter chain 0 survives if new listener config adds new filter @@ -438,15 +444,19 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 1); + ConfigHelper new_config_helper( version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + // Note that HCM2 copies the stats prefix from HCM0 + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(0)); *listener->mutable_filter_chains(2) ->mutable_filter_chain_match() ->mutable_application_protocols(0) = "alpn2"; + auto default_filter_chain = bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); default_filter_chain->MergeFrom(*listener->mutable_filter_chains(1)); @@ -458,6 +468,9 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { auto codec_client_2 = createHttpCodec("alpn2"); auto codec_client_default = createHttpCodec("alpndefault"); + // 1 connection from filter chain 0 and 1 connection from filter chain 2. + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 2); + Cleanup cleanup2([c2 = codec_client_2.get(), c_default = codec_client_default.get()]() { c2->close(); c_default->close(); @@ -465,7 +478,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { expectResponseHeaderConnectionClose(*codec_client_2, false); expectResponseHeaderConnectionClose(*codec_client_default, false); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify that http clients of default filter chain is drained and recreated if the default filter @@ -493,7 +506,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigUpdatingDefaultFilterCha Cleanup cleanup2([c_default_v3 = codec_client_default_v3.get()]() { c_default_v3->close(); }); expectResponseHeaderConnectionClose(*codec_client_default, true); expectResponseHeaderConnectionClose(*codec_client_default_v3, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify that balancer is inherited. Test only default balancer because ExactConnectionBalancer @@ -515,7 +528,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConne new_config_helper.setLds("1"); test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify default filter chain update is filter chain only update. @@ -597,6 +610,84 @@ TEST_P(LdsIntegrationTest, NewListenerWithBadPostListenSocketOption) { test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); } +// Verify the grpc cached logger is available after the initial logger filter is destroyed. +// Regression test for https://github.com/envoyproxy/envoy/issues/18066 +TEST_P(LdsIntegrationTest, GrpcLoggerSurvivesAfterReloadConfig) { + autonomous_upstream_ = true; + // The grpc access logger connection never closes. It's ok to see an incomplete logging stream. + autonomous_allow_incomplete_streams_ = true; + + const std::string grpc_logger_string = R"EOF( + name: grpc_accesslog + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig + common_config: + log_name: bar + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: cluster_0 + )EOF"; + + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_stat_prefix("listener_0"); + }); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { TestUtility::loadFromYaml(grpc_logger_string, *hcm.add_access_log()); }); + initialize(); + // Given we're using LDS in this test, initialize() will not complete until + // the initial LDS file has loaded. + EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); + + // HTTP 1.1 is allowed and the connection is kept open until the listener update. + std::string response; + auto connection = + createConnectionDriver(lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\n\r\n", + [&response, &dispatcher = *dispatcher_]( + Network::ClientConnection&, const Buffer::Instance& data) -> void { + response.append(data.toString()); + if (response.find("\r\n\r\n") != std::string::npos) { + dispatcher.exit(); + } + }); + connection->run(); + EXPECT_TRUE(response.find("HTTP/1.1 200") == 0); + + test_server_->waitForCounterEq("access_logs.grpc_access_log.logs_written", 1); + + // Create a new config with HTTP/1.0 proxying. The goal is to trigger a listener update. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_http_protocol_options()->set_accept_http_10(true); + hcm.mutable_http_protocol_options()->set_default_host_for_http_10("default.com"); + }); + + // Create an LDS response with the new config, and reload config. + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterEq("listener_manager.lds.update_success", 2); + + // Wait until the http 1.1 connection is destroyed due to the listener update. It indicates the + // listener starts draining. + test_server_->waitForGaugeEq("listener.listener_0.downstream_cx_active", 0); + // Wait until all the draining filter chain is gone. It indicates the old listener and filter + // chains are destroyed. + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + // Verify that the new listener config is applied. + std::string response2; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.0\r\n\r\n", &response2, true); + EXPECT_THAT(response2, HasSubstr("HTTP/1.0 200 OK\r\n")); + + // Verify that the grpc access logger is available after the listener update. + test_server_->waitForCounterEq("access_logs.grpc_access_log.logs_written", 2); +} + // Sample test making sure our config framework informs on listener failure. TEST_P(LdsIntegrationTest, FailConfigLoad) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index ffcfdc796831..b85e34b14c36 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -52,6 +52,10 @@ class MockUntypedConfigUpdateCallbacks : public UntypedConfigUpdateCallbacks { MOCK_METHOD(void, onConfigUpdate, (const Protobuf::RepeatedPtrField& resources, const std::string& version_info)); + + MOCK_METHOD(void, onConfigUpdate, + (const std::vector& resources, const std::string& version_info)); + MOCK_METHOD( void, onConfigUpdate, (const Protobuf::RepeatedPtrField& added_resources, diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index fe7bb325436a..8d91e3558291 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -133,9 +133,6 @@ class MockDispatcher : public Dispatcher { Network::Address::InstanceConstSharedPtr source_address, Network::TransportSocketPtr& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options)); - MOCK_METHOD(Network::DnsResolverSharedPtr, createDnsResolver, - (const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options)); MOCK_METHOD(FileEvent*, createFileEvent_, (os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events)); MOCK_METHOD(Filesystem::Watcher*, createFilesystemWatcher_, ()); diff --git a/test/mocks/event/wrapped_dispatcher.h b/test/mocks/event/wrapped_dispatcher.h index de0949358f5d..c36705cd457b 100644 --- a/test/mocks/event/wrapped_dispatcher.h +++ b/test/mocks/event/wrapped_dispatcher.h @@ -50,12 +50,6 @@ class WrappedDispatcher : public Dispatcher { std::move(transport_socket), options); } - Network::DnsResolverSharedPtr createDnsResolver( - const std::vector& resolvers, - const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) override { - return impl_.createDnsResolver(resolvers, dns_resolver_options); - } - FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) override { return impl_.createFileEvent(fd, cb, trigger, events); diff --git a/test/mocks/http/alternate_protocols_cache.h b/test/mocks/http/alternate_protocols_cache.h index e1e0a4558c83..eab348120845 100644 --- a/test/mocks/http/alternate_protocols_cache.h +++ b/test/mocks/http/alternate_protocols_cache.h @@ -22,7 +22,8 @@ class MockAlternateProtocolsCacheManager : public AlternateProtocolsCacheManager ~MockAlternateProtocolsCacheManager() override; MOCK_METHOD(AlternateProtocolsCacheSharedPtr, getCache, - (const envoy::config::core::v3::AlternateProtocolsCacheOptions& config)); + (const envoy::config::core::v3::AlternateProtocolsCacheOptions& config, + Event::Dispatcher& dispatcher)); }; class MockAlternateProtocolsCacheManagerFactory : public AlternateProtocolsCacheManagerFactory { diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 83cf041e0702..11db5041411a 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -516,6 +516,7 @@ class MockFilterChainFactoryCallbacks : public Http::FilterChainFactoryCallbacks (Http::StreamFilterSharedPtr filter, Matcher::MatchTreeSharedPtr match_tree)); MOCK_METHOD(void, addAccessLogHandler, (AccessLog::InstanceSharedPtr handler)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); }; class MockDownstreamWatermarkCallbacks : public DownstreamWatermarkCallbacks { diff --git a/test/mocks/http/stream.h b/test/mocks/http/stream.h index f456ade6fc96..9a0abc4c58df 100644 --- a/test/mocks/http/stream.h +++ b/test/mocks/http/stream.h @@ -38,6 +38,10 @@ class MockStream : public Stream { callback->onBelowWriteBufferLowWatermark(); } } + + const StreamInfo::BytesMeterSharedPtr& bytesMeter() override { return bytes_meter_; } + + StreamInfo::BytesMeterSharedPtr bytes_meter_{std::make_shared()}; }; } // namespace Http diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index 84017eceb6cd..3e930a629cce 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -21,7 +21,7 @@ class MockRequestDecoder : public RequestDecoder { const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details)); - MOCK_METHOD(const StreamInfo::StreamInfo&, streamInfo, (), (const)); + MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ()); void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { decodeHeaders_(headers, end_stream); diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 3e3757f1ea2f..304cb8d8c44f 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -59,6 +59,7 @@ envoy_cc_mock( "//source/common/network:address_lib", "//source/common/network:socket_interface_lib", "//source/common/network:utility_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/stats:isolated_store_lib", "//test/mocks/event:event_mocks", "//test/mocks/stream_info:stream_info_mocks", diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 22ea2a6d1f66..f22effb4e804 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -18,6 +18,7 @@ #include "envoy/network/transport_socket.h" #include "envoy/stats/scope.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/filter_manager_impl.h" #include "source/common/network/socket_interface.h" #include "source/common/network/socket_interface_impl.h" @@ -64,6 +65,22 @@ class MockDnsResolver : public DnsResolver { testing::NiceMock active_query_; }; +class MockDnsResolverFactory : public DnsResolverFactory { +public: + MockDnsResolverFactory() = default; + ~MockDnsResolverFactory() override = default; + + MOCK_METHOD(DnsResolverSharedPtr, createDnsResolver, + (Event::Dispatcher & dispatcher, Api::Api& api, + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config), + (const, override)); + std::string name() const override { return std::string(CaresDnsResolver); }; + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{ + new envoy::extensions::network::dns_resolver::cares::v3::CaresDnsResolverConfig()}; + } +}; + class MockAddressResolver : public Address::Resolver { public: MockAddressResolver(); @@ -289,6 +306,7 @@ class MockSocketOption : public Socket::Option { MOCK_METHOD(void, hashKey, (std::vector&), (const)); MOCK_METHOD(absl::optional, getOptionDetails, (const Socket&, envoy::config::core::v3::SocketOption::SocketState state), (const)); + MOCK_METHOD(bool, isSupported, (), (const)); }; class MockConnectionSocket : public ConnectionSocket { @@ -577,7 +595,8 @@ class MockUdpListenerReadFilter : public UdpListenerReadFilter { MockUdpListenerReadFilter(UdpReadFilterCallbacks& callbacks); ~MockUdpListenerReadFilter() override; - MOCK_METHOD(void, onData, (UdpRecvData&)); + MOCK_METHOD(Network::FilterStatus, onData, (UdpRecvData&)); + MOCK_METHOD(Network::FilterStatus, onReceiveError, (Api::IoError::IoErrorCode)); }; class MockUdpListenerFilterManager : public UdpListenerFilterManager { diff --git a/test/mocks/protobuf/mocks.h b/test/mocks/protobuf/mocks.h index 3e61b31fed12..7a54e2c18d61 100644 --- a/test/mocks/protobuf/mocks.h +++ b/test/mocks/protobuf/mocks.h @@ -14,9 +14,9 @@ class MockValidationVisitor : public ValidationVisitor { MOCK_METHOD(void, onUnknownField, (absl::string_view)); MOCK_METHOD(void, onDeprecatedField, (absl::string_view, bool)); + MOCK_METHOD(void, onWorkInProgress, (absl::string_view)); bool skipValidation() override { return skip_validation_; } - void setSkipValidation(bool s) { skip_validation_ = s; } private: diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index 669071ba8c8d..9e0fca173f42 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -244,6 +244,7 @@ envoy_cc_mock( "//source/common/secret:secret_manager_impl_lib", "//test/mocks/api:api_mocks", "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:options_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:cluster_manager_mocks", ], diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc index 70d440be4a10..40b9648dda9f 100644 --- a/test/mocks/server/factory_context.cc +++ b/test/mocks/server/factory_context.cc @@ -29,6 +29,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index 53092aa382b4..79797209d211 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -33,6 +33,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); @@ -41,6 +42,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(bool, isQuicListener, (), (const)); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); + MOCK_METHOD(const Envoy::Config::TypedMetadata&, listenerTypedMetadata, (), (const)); MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); MOCK_METHOD(TimeSource&, timeSource, ()); Event::TestTimeSystem& timeSystem() { return time_system_; } diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index 3ea0f9ea53ad..2351629bc4a8 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -66,6 +66,7 @@ MockServerFactoryContext::MockServerFactoryContext() ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 51858c0ba7f1..de4f51099823 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -155,6 +155,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); MOCK_METHOD(Server::Admin&, admin, ()); diff --git a/test/mocks/server/listener_factory_context.cc b/test/mocks/server/listener_factory_context.cc index fd8c0e047d13..a604a4bec5a6 100644 --- a/test/mocks/server/listener_factory_context.cc +++ b/test/mocks/server/listener_factory_context.cc @@ -28,6 +28,7 @@ MockListenerFactoryContext::MockListenerFactoryContext() ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, serverScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h index c78138e477de..0047446ba774 100644 --- a/test/mocks/server/listener_factory_context.h +++ b/test/mocks/server/listener_factory_context.h @@ -35,6 +35,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Stats::Scope&, serverScope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); @@ -43,6 +44,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(bool, isQuicListener, (), (const)); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); + MOCK_METHOD(const Envoy::Config::TypedMetadata&, listenerTypedMetadata, (), (const)); MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); MOCK_METHOD(TimeSource&, timeSource, ()); Event::TestTimeSystem& timeSystem() { return time_system_; } diff --git a/test/mocks/server/transport_socket_factory_context.cc b/test/mocks/server/transport_socket_factory_context.cc index 88ea41bd20fa..19be74677361 100644 --- a/test/mocks/server/transport_socket_factory_context.cc +++ b/test/mocks/server/transport_socket_factory_context.cc @@ -19,6 +19,7 @@ MockTransportSocketFactoryContext::MockTransportSocketFactoryContext() .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(context_manager_)); ON_CALL(*this, scope()).WillByDefault(ReturnRef(store_)); + ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); } MockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default; diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h index fe3190962625..75389e159b59 100644 --- a/test/mocks/server/transport_socket_factory_context.h +++ b/test/mocks/server/transport_socket_factory_context.h @@ -5,6 +5,7 @@ #include "source/common/secret/secret_manager_impl.h" #include "test/mocks/api/mocks.h" +#include "test/mocks/server/options.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/cluster_manager.h" @@ -42,6 +43,7 @@ class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { testing::NiceMock config_tracker_; testing::NiceMock context_manager_; testing::NiceMock store_; + testing::NiceMock options_; std::unique_ptr secret_manager_; }; } // namespace Configuration diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index 66ed44834fc1..7eba3fc31cda 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -72,6 +72,7 @@ MockSink::~MockSink() = default; MockStore::MockStore() { ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_)); + ON_CALL(*this, gauge(_, _)).WillByDefault(ReturnRef(gauge_)); ON_CALL(*this, histogram(_, _)) .WillByDefault(Invoke([this](const std::string& name, Histogram::Unit unit) -> Histogram& { auto* histogram = new NiceMock(); // symbol_table_); diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index ba81b5922f30..3043a2c35fbb 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -321,6 +321,7 @@ class MockStore : public TestUtil::TestStore { TestUtil::TestSymbolTable symbol_table_; testing::NiceMock counter_; + testing::NiceMock gauge_; std::vector> histograms_; }; diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 49bdf235a5ae..2eaad512dcd7 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -130,6 +130,16 @@ MockStreamInfo::MockStreamInfo() attempt_count_ = attempt_count; })); ON_CALL(*this, attemptCount()).WillByDefault(Invoke([this]() { return attempt_count_; })); + ON_CALL(*this, getUpstreamBytesMeter()).WillByDefault(ReturnPointee(&upstream_bytes_meter_)); + ON_CALL(*this, getDownstreamBytesMeter()).WillByDefault(ReturnPointee(&downstream_bytes_meter_)); + ON_CALL(*this, setUpstreamBytesMeter(_)) + .WillByDefault(Invoke([this](const BytesMeterSharedPtr& upstream_bytes_meter) { + upstream_bytes_meter_ = upstream_bytes_meter; + })); + ON_CALL(*this, setDownstreamBytesMeter(_)) + .WillByDefault(Invoke([this](const BytesMeterSharedPtr& downstream_bytes_meter) { + downstream_bytes_meter_ = downstream_bytes_meter; + })); } MockStreamInfo::~MockStreamInfo() = default; diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index 2d296f9697fb..be4c47a77ead 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -48,6 +48,8 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(absl::optional, requestComplete, (), (const)); MOCK_METHOD(void, addBytesReceived, (uint64_t)); MOCK_METHOD(uint64_t, bytesReceived, (), (const)); + MOCK_METHOD(void, addWireBytesReceived, (uint64_t)); + MOCK_METHOD(uint64_t, wireBytesReceived, (), (const)); MOCK_METHOD(void, setRouteName, (absl::string_view route_name)); MOCK_METHOD(const std::string&, getRouteName, (), (const)); MOCK_METHOD(absl::optional, protocol, (), (const)); @@ -57,6 +59,8 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(const absl::optional&, connectionTerminationDetails, (), (const)); MOCK_METHOD(void, addBytesSent, (uint64_t)); MOCK_METHOD(uint64_t, bytesSent, (), (const)); + MOCK_METHOD(void, addWireBytesSent, (uint64_t)); + MOCK_METHOD(uint64_t, wireBytesSent, (), (const)); MOCK_METHOD(bool, hasResponseFlag, (ResponseFlag), (const)); MOCK_METHOD(bool, hasAnyResponseFlag, (), (const)); MOCK_METHOD(uint64_t, responseFlags, (), (const)); @@ -98,7 +102,10 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(absl::optional, upstreamConnectionId, (), (const)); MOCK_METHOD(void, setAttemptCount, (uint32_t), ()); MOCK_METHOD(absl::optional, attemptCount, (), (const)); - + MOCK_METHOD(const BytesMeterSharedPtr&, getUpstreamBytesMeter, (), (const)); + MOCK_METHOD(const BytesMeterSharedPtr&, getDownstreamBytesMeter, (), (const)); + MOCK_METHOD(void, setUpstreamBytesMeter, (const BytesMeterSharedPtr&)); + MOCK_METHOD(void, setDownstreamBytesMeter, (const BytesMeterSharedPtr&)); std::shared_ptr> host_{ new testing::NiceMock()}; Envoy::Event::SimulatedTimeSystem ts_; @@ -125,6 +132,8 @@ class MockStreamInfo : public StreamInfo { uint64_t bytes_sent_{}; Network::Address::InstanceConstSharedPtr upstream_local_address_; std::shared_ptr downstream_connection_info_provider_; + BytesMeterSharedPtr upstream_bytes_meter_; + BytesMeterSharedPtr downstream_bytes_meter_; Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_; Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_; std::string route_name_; diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index e6c41713f545..85eadc255f91 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -93,6 +93,7 @@ envoy_cc_mock( ":thread_aware_load_balancer_mocks", ":thread_local_cluster_mocks", ":transport_socket_match_mocks", + ":typed_load_balancer_factory_mocks", "//envoy/http:async_client_interface", "//envoy/upstream:cluster_factory_interface", "//envoy/upstream:cluster_manager_interface", @@ -204,6 +205,15 @@ envoy_cc_mock( ], ) +envoy_cc_mock( + name = "typed_load_balancer_factory_mocks", + srcs = ["typed_load_balancer_factory.cc"], + hdrs = ["typed_load_balancer_factory.h"], + deps = [ + "//envoy/upstream:load_balancer_interface", + ], +) + envoy_cc_mock( name = "thread_local_cluster_mocks", srcs = ["thread_local_cluster.cc"], diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index c24c81d95bc2..75b0f629e68f 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -99,6 +99,7 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, lbType()).WillByDefault(ReturnPointee(&lb_type_)); ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_)); ON_CALL(*this, lbSubsetInfo()).WillByDefault(ReturnRef(lb_subset_)); + ON_CALL(*this, lbRoundRobinConfig()).WillByDefault(ReturnRef(lb_round_robin_config_)); ON_CALL(*this, lbRingHashConfig()).WillByDefault(ReturnRef(lb_ring_hash_config_)); ON_CALL(*this, lbMaglevConfig()).WillByDefault(ReturnRef(lb_maglev_config_)); ON_CALL(*this, lbOriginalDstConfig()).WillByDefault(ReturnRef(lb_original_dst_config_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 5e5415f88472..afd3f405c57d 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -90,6 +90,7 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, addedViaApi, (), (const)); MOCK_METHOD(std::chrono::milliseconds, connectTimeout, (), (const)); MOCK_METHOD(const absl::optional, idleTimeout, (), (const)); + MOCK_METHOD(const absl::optional, maxConnectionDuration, (), (const)); MOCK_METHOD(const absl::optional, maxStreamDuration, (), (const)); MOCK_METHOD(const absl::optional, grpcTimeoutHeaderMax, (), (const)); MOCK_METHOD(const absl::optional, grpcTimeoutHeaderOffset, (), @@ -117,6 +118,8 @@ class MockClusterInfo : public ClusterInfo { lbRingHashConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbMaglevConfig, (), (const)); + MOCK_METHOD(const absl::optional&, + lbRoundRobinConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbLeastRequestConfig, (), (const)); MOCK_METHOD(const absl::optional&, @@ -194,6 +197,7 @@ class MockClusterInfo : public ClusterInfo { upstream_http_protocol_options_; absl::optional alternate_protocols_cache_options_; + absl::optional lb_round_robin_config_; absl::optional lb_ring_hash_config_; absl::optional lb_maglev_config_; absl::optional lb_original_dst_config_; @@ -215,5 +219,11 @@ class MockIdleTimeEnabledClusterInfo : public MockClusterInfo { ~MockIdleTimeEnabledClusterInfo() override; }; +class MockMaxConnectionDurationEnabledClusterInfo : public MockClusterInfo { +public: + MockMaxConnectionDurationEnabledClusterInfo(); + ~MockMaxConnectionDurationEnabledClusterInfo() override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/test/mocks/upstream/load_balancer.h b/test/mocks/upstream/load_balancer.h index 356782e914bb..5edbd0d06b73 100644 --- a/test/mocks/upstream/load_balancer.h +++ b/test/mocks/upstream/load_balancer.h @@ -17,6 +17,11 @@ class MockLoadBalancer : public LoadBalancer { // Upstream::LoadBalancer MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); MOCK_METHOD(HostConstSharedPtr, peekAnotherHost, (LoadBalancerContext * context)); + MOCK_METHOD(absl::optional, selectExistingConnection, + (Upstream::LoadBalancerContext * context, const Upstream::Host& host, + std::vector& hash_key)); + MOCK_METHOD(OptRef, lifetimeCallbacks, + ()); std::shared_ptr host_{new MockHost()}; }; diff --git a/test/mocks/upstream/typed_load_balancer_factory.cc b/test/mocks/upstream/typed_load_balancer_factory.cc new file mode 100644 index 000000000000..7fff528012b0 --- /dev/null +++ b/test/mocks/upstream/typed_load_balancer_factory.cc @@ -0,0 +1,10 @@ +#include "typed_load_balancer_factory.h" + +namespace Envoy { +namespace Upstream { +MockTypedLoadBalancerFactory::MockTypedLoadBalancerFactory() = default; + +MockTypedLoadBalancerFactory::~MockTypedLoadBalancerFactory() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/typed_load_balancer_factory.h b/test/mocks/upstream/typed_load_balancer_factory.h new file mode 100644 index 000000000000..33f6849597af --- /dev/null +++ b/test/mocks/upstream/typed_load_balancer_factory.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockTypedLoadBalancerFactory : public TypedLoadBalancerFactory { +public: + MockTypedLoadBalancerFactory(); + ~MockTypedLoadBalancerFactory() override; + + // Upstream::TypedLoadBalancerFactory + MOCK_METHOD(std::string, name, (), (const)); + MOCK_METHOD(ThreadAwareLoadBalancerPtr, create, + (const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& stats_scope, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const ::envoy::config::cluster::v3::LoadBalancingPolicy_Policy& lb_policy)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 701be1208113..1a50e20be0ca 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,32 +3,35 @@ # directory:coverage_percent # for existing directories with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/common:96.1" # Raise when QUIC coverage goes up +"source/common:95.9" # Raise when QUIC coverage goes up "source/common/api:79.8" "source/common/api/posix:78.5" "source/common/common/posix:92.7" -"source/common/config/xds_mux:94.5" +"source/common/config:96.5" "source/common/crypto:0.0" "source/common/event:94.1" # Emulated edge events guards don't report LCOV "source/common/filesystem/posix:95.5" -"source/common/http:96.5" +"source/common/http:96.3" +"source/common/http/http2:96.4" "source/common/json:90.1" -"source/common/matcher:94.2" -"source/common/network:94.8" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl` do not always report LCOV +"source/common/matcher:94.0" +"source/common/network:94.4" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl`, listener_socket do not always report LCOV +"source/common/network/dns_resolver:90.7" # A few lines of MacOS code not tested in linux scripts. Tested in MacOS scripts "source/common/protobuf:95.3" "source/common/quic:91.8" -"source/common/secret:96.3" +"source/common/router:96.5" +"source/common/secret:94.9" "source/common/signal:86.9" # Death tests don't report LCOV "source/common/singleton:95.7" "source/common/tcp:94.6" "source/common/thread:0.0" # Death tests don't report LCOV "source/common/tracing:96.1" -"source/common/upstream:96.5" +"source/common/upstream:96.1" "source/common/watchdog:58.6" # Death tests don't report LCOV "source/exe:92.6" -"source/extensions/common:96.0" +"source/extensions/common:95.8" "source/extensions/common/tap:94.2" -"source/extensions/common/wasm:95.3" # flaky: be careful adjusting +"source/extensions/common/wasm:95.2" # flaky: be careful adjusting "source/extensions/common/wasm/ext:92.0" "source/extensions/filters/common:96.1" "source/extensions/filters/common/expr:96.2" @@ -40,11 +43,12 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/grpc_json_transcoder:94.7" "source/extensions/filters/http/ip_tagging:89.1" "source/extensions/filters/http/kill_request:95.3" # Death tests don't report LCOV +"source/extensions/filters/http/lua:96.4" "source/extensions/filters/http/wasm:95.8" -"source/extensions/filters/listener:96.2" -"source/extensions/filters/listener/http_inspector:95.9" +"source/extensions/filters/listener:95.9" +"source/extensions/filters/listener/http_inspector:95.8" "source/extensions/filters/listener/original_dst:93.3" -"source/extensions/filters/listener/tls_inspector:93.5" +"source/extensions/filters/listener/tls_inspector:92.3" "source/extensions/filters/network/common:96.0" "source/extensions/filters/network/common/redis:96.2" "source/extensions/filters/network/mongo_proxy:95.5" @@ -53,7 +57,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/network/thrift_proxy/router:96.4" "source/extensions/filters/network/wasm:95.7" "source/extensions/filters/udp:96.4" -"source/extensions/filters/udp/dns_filter:96.2" +"source/extensions/filters/udp/dns_filter:96.1" "source/extensions/health_checkers:95.7" "source/extensions/health_checkers/redis:95.7" "source/extensions/io_socket:96.2" @@ -65,9 +69,9 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/tracers/opencensus:94.8" "source/extensions/tracers/xray:96.2" "source/extensions/tracers/zipkin:96.1" -"source/extensions/transport_sockets:95.4" -"source/extensions/transport_sockets/tls:94.6" -"source/extensions/transport_sockets/tls/cert_validator:96.0" +"source/extensions/transport_sockets:95.3" +"source/extensions/transport_sockets/tls:94.5" +"source/extensions/transport_sockets/tls/cert_validator:95.8" "source/extensions/transport_sockets/tls/ocsp:96.5" "source/extensions/transport_sockets/tls/private_key:77.8" "source/extensions/wasm_runtime/wamr:0.0" # Not enabled in coverage build @@ -77,7 +81,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/watchdog/profile_action:83.3" "source/server:93.5" # flaky: be careful adjusting. See https://github.com/envoyproxy/envoy/issues/15239 "source/server/admin:95.3" -"source/server/config_validation:76.7" +"source/server/config_validation:74.8" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" diff --git a/test/proto/BUILD b/test/proto/BUILD index f1ab09c62349..4d44e5d89569 100644 --- a/test/proto/BUILD +++ b/test/proto/BUILD @@ -49,6 +49,6 @@ envoy_proto_library( srcs = [":sensitive.proto"], deps = [ "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//udpa/type/v1:pkg", + "@com_github_cncf_udpa//xds/type/v3:pkg", ], ) diff --git a/test/proto/sensitive.proto b/test/proto/sensitive.proto index 9d7726143e0b..2047dc465f80 100644 --- a/test/proto/sensitive.proto +++ b/test/proto/sensitive.proto @@ -10,7 +10,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/sensitive.proto"; -import "udpa/type/v1/typed_struct.proto"; +import "xds/type/v3/typed_struct.proto"; message Sensitive { string sensitive_string = 1 [(udpa.annotations.sensitive) = true]; @@ -23,8 +23,8 @@ message Sensitive { repeated Sensitive sensitive_repeated_message = 8 [(udpa.annotations.sensitive) = true]; google.protobuf.Any sensitive_any = 9 [(udpa.annotations.sensitive) = true]; repeated google.protobuf.Any sensitive_repeated_any = 10 [(udpa.annotations.sensitive) = true]; - udpa.type.v1.TypedStruct sensitive_typed_struct = 11 [(udpa.annotations.sensitive) = true]; - repeated udpa.type.v1.TypedStruct sensitive_repeated_typed_struct = 12 + xds.type.v3.TypedStruct sensitive_typed_struct = 11 [(udpa.annotations.sensitive) = true]; + repeated xds.type.v3.TypedStruct sensitive_repeated_typed_struct = 12 [(udpa.annotations.sensitive) = true]; map sensitive_string_map = 13 [(udpa.annotations.sensitive) = true]; map sensitive_int_map = 14 [(udpa.annotations.sensitive) = true]; @@ -39,8 +39,8 @@ message Sensitive { repeated Sensitive insensitive_repeated_message = 108; google.protobuf.Any insensitive_any = 109; repeated google.protobuf.Any insensitive_repeated_any = 110; - udpa.type.v1.TypedStruct insensitive_typed_struct = 111; - repeated udpa.type.v1.TypedStruct insensitive_repeated_typed_struct = 112; + xds.type.v3.TypedStruct insensitive_typed_struct = 111; + repeated xds.type.v3.TypedStruct insensitive_repeated_typed_struct = 112; map insensitive_string_map = 113; map insensitive_int_map = 114; } diff --git a/test/server/BUILD b/test/server/BUILD index 952131ac020f..3195047b65ac 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -113,6 +113,20 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "active_udp_listener_test", + srcs = ["active_udp_listener_test.cc"], + deps = [ + "//source/common/network:listen_socket_lib", + "//source/common/network:socket_option_factory_lib", + "//source/common/network:udp_packet_writer_handler_lib", + "//source/server:active_udp_listener", + "//test/mocks/network:network_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + ], +) + envoy_cc_test( name = "drain_manager_impl_test", srcs = ["drain_manager_impl_test.cc"], diff --git a/test/server/active_udp_listener_test.cc b/test/server/active_udp_listener_test.cc new file mode 100644 index 000000000000..7337c0d2e555 --- /dev/null +++ b/test/server/active_udp_listener_test.cc @@ -0,0 +1,183 @@ +#include + +#include "envoy/network/filter.h" +#include "envoy/network/listener.h" + +#include "source/common/network/listen_socket_impl.h" +#include "source/common/network/socket_option_factory.h" +#include "source/common/network/udp_packet_writer_handler_impl.h" +#include "source/server/active_udp_listener.h" + +#include "test/mocks/network/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Server { +namespace { + +class MockUdpConnectionHandler : public Network::UdpConnectionHandler, + public Network::MockConnectionHandler { +public: + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Network::UdpListenerCallbacksOptRef, getUdpListenerCallbacks, + (uint64_t listener_tag)); +}; + +class ActiveUdpListenerTest : public testing::TestWithParam, + protected Logger::Loggable { +public: + ActiveUdpListenerTest() + : version_(GetParam()), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)) { + } + + void SetUp() override { + ON_CALL(conn_handler_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + EXPECT_CALL(conn_handler_, statPrefix()).WillRepeatedly(ReturnRef(listener_stat_prefix_)); + + listen_socket_ = + std::make_shared(local_address_, nullptr, /*bind*/ true); + listen_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); + listen_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + ASSERT_TRUE(Network::Socket::applyOptions(listen_socket_->options(), *listen_socket_, + envoy::config::core::v3::SocketOption::STATE_BOUND)); + + ON_CALL(socket_factory_, getListenSocket(_)).WillByDefault(Return(listen_socket_)); + EXPECT_CALL(listener_config_, listenSocketFactory()).WillRepeatedly(ReturnRef(socket_factory_)); + + // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test, if it is + // supported. + EXPECT_CALL(listener_config_, udpListenerConfig()) + .WillRepeatedly(Return(Network::UdpListenerConfigOptRef(udp_listener_config_))); + EXPECT_CALL(listener_config_, listenerScope()).WillRepeatedly(ReturnRef(scope_)); + EXPECT_CALL(listener_config_, filterChainFactory()); + ON_CALL(udp_listener_config_, packetWriterFactory()) + .WillByDefault(ReturnRef(udp_packet_writer_factory_)); + ON_CALL(udp_packet_writer_factory_, createUdpPacketWriter(_, _)) + .WillByDefault(Invoke( + [&](Network::IoHandle& io_handle, Stats::Scope& scope) -> Network::UdpPacketWriterPtr { +#if UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT + return std::make_unique(io_handle, scope); +#else + UNREFERENCED_PARAMETER(scope); + return std::make_unique(io_handle); +#endif + })); + + EXPECT_CALL(cb_.udp_listener_, onDestroy()); + } + + void setup() { + active_listener_ = + std::make_unique(0, 1, conn_handler_, dispatcher_, listener_config_); + } + + std::string listener_stat_prefix_{"listener_stat_prefix"}; + NiceMock dispatcher_{"test"}; + NiceMock conn_handler_; + Network::Address::IpVersion version_; + Network::Address::InstanceConstSharedPtr local_address_; + Network::SocketSharedPtr listen_socket_; + NiceMock socket_factory_; + Stats::IsolatedStoreImpl scope_; + NiceMock udp_listener_config_; + NiceMock udp_packet_writer_factory_; + Network::MockListenerConfig listener_config_; + std::unique_ptr active_listener_; + NiceMock cb_; +}; + +INSTANTIATE_TEST_SUITE_P(ActiveUdpListenerTests, ActiveUdpListenerTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ActiveUdpListenerTest, MultipleFiltersOnData) { + setup(); + + auto* test_filter = new NiceMock(cb_); + EXPECT_CALL(*test_filter, onData(_)) + .WillOnce(Invoke([](Network::UdpRecvData&) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + auto* test_filter2 = new NiceMock(cb_); + EXPECT_CALL(*test_filter2, onData(_)) + .WillOnce(Invoke([](Network::UdpRecvData&) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter}); + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter2}); + + Network::UdpRecvData data; + active_listener_->onDataWorker(std::move(data)); +} + +TEST_P(ActiveUdpListenerTest, MultipleFiltersOnDataStopIteration) { + setup(); + + auto* test_filter = new NiceMock(cb_); + EXPECT_CALL(*test_filter, onData(_)) + .WillOnce(Invoke([](Network::UdpRecvData&) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + auto* test_filter2 = new NiceMock(cb_); + EXPECT_CALL(*test_filter2, onData(_)).Times(0); + + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter}); + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter2}); + + Network::UdpRecvData data; + active_listener_->onDataWorker(std::move(data)); +} + +TEST_P(ActiveUdpListenerTest, MultipleFiltersOnReceiveError) { + setup(); + + auto* test_filter = new NiceMock(cb_); + EXPECT_CALL(*test_filter, onReceiveError(_)) + .WillOnce(Invoke([](Api::IoError::IoErrorCode) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + auto* test_filter2 = new NiceMock(cb_); + EXPECT_CALL(*test_filter2, onReceiveError(_)) + .WillOnce(Invoke([](Api::IoError::IoErrorCode) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter}); + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter2}); + + Network::UdpRecvData data; + active_listener_->onReceiveError(Api::IoError::IoErrorCode::UnknownError); +} + +TEST_P(ActiveUdpListenerTest, MultipleFiltersOnReceiveErrorStopIteration) { + setup(); + + auto* test_filter = new NiceMock(cb_); + EXPECT_CALL(*test_filter, onReceiveError(_)) + .WillOnce(Invoke([](Api::IoError::IoErrorCode) -> Network::FilterStatus { + return Network::FilterStatus::StopIteration; + })); + auto* test_filter2 = new NiceMock(cb_); + EXPECT_CALL(*test_filter2, onReceiveError(_)).Times(0); + + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter}); + active_listener_->addReadFilter(Network::UdpListenerReadFilterPtr{test_filter2}); + + Network::UdpRecvData data; + active_listener_->onReceiveError(Api::IoError::IoErrorCode::UnknownError); +} + +} // namespace +} // namespace Server +} // namespace Envoy diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index ff920e17b0eb..8a0fce046499 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -48,10 +48,14 @@ TEST_P(AdminInstanceTest, MutatesErrorWithGet) { TEST_P(AdminInstanceTest, Getters) { EXPECT_EQ(&admin_.mutableSocket(), &admin_.socket()); EXPECT_EQ(1, admin_.concurrency()); - EXPECT_EQ(false, admin_.preserveExternalRequestId()); + EXPECT_FALSE(admin_.preserveExternalRequestId()); EXPECT_EQ(nullptr, admin_.tracer()); - EXPECT_EQ(false, admin_.streamErrorOnInvalidHttpMessaging()); - EXPECT_EQ(false, admin_.schemeToSet().has_value()); + EXPECT_FALSE(admin_.streamErrorOnInvalidHttpMessaging()); + EXPECT_FALSE(admin_.schemeToSet().has_value()); + EXPECT_EQ(admin_.pathWithEscapedSlashesAction(), + envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::KEEP_UNCHANGED); + EXPECT_NE(nullptr, admin_.scopedRouteConfigProvider()); } TEST_P(AdminInstanceTest, WriteAddressToFile) { diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 92c40e7d386c..0c86261c5983 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -249,7 +249,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(std::vector(0)); Stats::ConstSupportedBuckets buckets{10, 20}; - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram(), buckets); + Stats::HistogramStatisticsImpl h1_cumulative_statistics( + h1_cumulative.getHistogram(), Stats::Histogram::Unit::Unspecified, buckets); auto histogram = makeHistogram("histogram1", {}); ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); @@ -273,6 +274,46 @@ envoy_histogram1_count{} 0 EXPECT_EQ(expected_output, response.toString()); } +// Test that scaled percents are emitted in the expected 0.0-1.0 range, and that the buckets +// apply to the final output range, not the internal scaled range. +TEST_F(PrometheusStatsFormatterTest, HistogramWithScaledPercent) { + Stats::CustomStatNamespacesImpl custom_namespaces; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::ConstSupportedBuckets buckets{0.5, 1.0}; + + constexpr double scale_factor = Stats::Histogram::PercentScale; + h1_cumulative.setHistogramValuesWithCounts(std::vector>({ + {0.25 * scale_factor, 1}, + {0.75 * scale_factor, 1}, + {1.25 * scale_factor, 1}, + })); + + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram(), + Stats::Histogram::Unit::Percent, buckets); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 1 +envoy_histogram1_bucket{le="1"} 2 +envoy_histogram1_bucket{le="+Inf"} 3 +envoy_histogram1_sum{} 2.2599999999999997868371792719699 +envoy_histogram1_count{} 3 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; diff --git a/test/server/config_validation/dispatcher_test.cc b/test/server/config_validation/dispatcher_test.cc index e72afe4cdc22..58bbea8c0ee0 100644 --- a/test/server/config_validation/dispatcher_test.cc +++ b/test/server/config_validation/dispatcher_test.cc @@ -60,22 +60,6 @@ TEST_P(ConfigValidation, CreateScaledTimer) { SUCCEED(); } -// Make sure that creating DnsResolver does not cause crash and each call to create -// DNS resolver returns the same shared_ptr. -TEST_F(ConfigValidation, SharedDnsResolver) { - std::vector resolvers; - auto dns_resolver_options = envoy::config::core::v3::DnsResolverOptions(); - - Network::DnsResolverSharedPtr dns1 = - dispatcher_->createDnsResolver(resolvers, dns_resolver_options); - long use_count = dns1.use_count(); - Network::DnsResolverSharedPtr dns2 = - dispatcher_->createDnsResolver(resolvers, dns_resolver_options); - - EXPECT_EQ(dns1.get(), dns2.get()); // Both point to the same instance. - EXPECT_EQ(use_count + 1, dns2.use_count()); // Each call causes ++ in use_count. -} - INSTANTIATE_TEST_SUITE_P(IpVersions, ConfigValidation, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 06bb9d1f387b..fb5535c6de08 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -54,7 +54,8 @@ void XdsFuzzTest::updateRoute( std::to_string(version_)); } -XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input) +XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + bool use_unified_mux) : HttpIntegrationTest( Http::CodecType::HTTP2, TestEnvironment::getIpVersionsForTest()[0], ConfigHelper::adsBootstrap(input.config().sotw_or_delta() == @@ -63,6 +64,9 @@ XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& inp : "DELTA_GRPC")), verifier_(input.config().sotw_or_delta()), actions_(input.actions()), version_(1), ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { + if (use_unified_mux) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.unified_mux", "true"); + } use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = false; @@ -71,9 +75,9 @@ XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& inp drain_time_ = std::chrono::seconds(60); if (input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW) { - sotw_or_delta_ = Grpc::SotwOrDelta::Sotw; + sotw_or_delta_ = use_unified_mux ? Grpc::SotwOrDelta::UnifiedSotw : Grpc::SotwOrDelta::Sotw; } else { - sotw_or_delta_ = Grpc::SotwOrDelta::Delta; + sotw_or_delta_ = use_unified_mux ? Grpc::SotwOrDelta::UnifiedDelta : Grpc::SotwOrDelta::Delta; } } @@ -206,7 +210,8 @@ void XdsFuzzTest::addRoute(const std::string& route_name) { */ AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, const std::string& expected_version) { - if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw || + sotw_or_delta_ == Grpc::SotwOrDelta::UnifiedSotw) { envoy::service::discovery::v3::DiscoveryRequest discovery_request; do { VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); @@ -237,8 +242,11 @@ void XdsFuzzTest::replay() { sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "0"); + // TODO (dmitri-d) legacy delta sends node with every DiscoveryRequest, other mux implementations + // follow set_node_on_first_message_only config flag EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", - {"cluster_0"}, {"cluster_0"}, {})); + {"cluster_0"}, {"cluster_0"}, {}, + sotw_or_delta_ == Grpc::SotwOrDelta::Delta)); sendDiscoveryResponse( Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, {buildClusterLoadAssignment("cluster_0")}, {}, "0"); diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index 42c23959ccb7..1e9c4700bb77 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -22,7 +22,8 @@ namespace Envoy { class XdsFuzzTest : public HttpIntegrationTest { public: - XdsFuzzTest(const test::server::config_validation::XdsTestCase& input); + XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + bool use_unified_mux = false); envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); diff --git a/test/server/config_validation/xds_fuzz_test.cc b/test/server/config_validation/xds_fuzz_test.cc index 231100d2972b..52c2d583e4ad 100644 --- a/test/server/config_validation/xds_fuzz_test.cc +++ b/test/server/config_validation/xds_fuzz_test.cc @@ -14,6 +14,8 @@ DEFINE_PROTO_FUZZER(const test::server::config_validation::XdsTestCase& input) { } XdsFuzzTest test(input); test.replay(); + XdsFuzzTest test_with_unified_mux(input, true); + test_with_unified_mux.replay(); } } // namespace Envoy diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 8862f46df622..ee7dbc3e8365 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -26,7 +26,7 @@ #include "fmt/printf.h" #include "gmock/gmock.h" #include "gtest/gtest.h" -#include "udpa/type/v1/typed_struct.pb.h" +#include "xds/type/v3/typed_struct.pb.h" using testing::NiceMock; using testing::Return; @@ -402,7 +402,7 @@ TEST_F(ConfigurationImplTest, ConfigurationFailsWhenInvalidTracerSpecified) { "http": { "name": "invalid", "typed_config": { - "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", + "@type": "type.googleapis.com/xds.type.v3.TypedStruct", "type_url": "type.googleapis.com/envoy.config.trace.v2.BlackHoleConfig", "value": { "collector_cluster": "cluster_0", @@ -583,7 +583,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithNoType) { auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); - udpa::type::v1::TypedStruct typed_struct; + xds::type::v3::TypedStruct typed_struct; auto untyped_struct = typed_struct.mutable_value(); (*untyped_struct->mutable_fields())["foo"].set_string_value("bar"); sink.mutable_typed_config()->PackFrom(typed_struct); diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 5de495694bfc..672f24e90164 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -172,8 +172,8 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::LoggableaddOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), - EnvoyException, "Only 1 UDP listener filter per listener supported"); -} - TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { const std::string yaml = R"EOF( address: @@ -5034,6 +5018,101 @@ traffic_direction: INBOUND EXPECT_CALL(*listener_foo_update1, onDestroy()); } +TEST_F(ListenerManagerImplTest, ListenSocketFactoryIsClonedFromListenerDrainingFilterChain) { + InSequence s; + + EXPECT_CALL(*worker_, start(_, _)); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); + + // Add foo listener. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true, listener_foo); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1UL, manager_->listeners().size()); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // The warmed up starts the drain timer. + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(server_.options_, drainTime()).WillOnce(Return(std::chrono::seconds(600))); + Event::MockTimer* filter_chain_drain_timer = new Event::MockTimer(&server_.dispatcher_); + EXPECT_CALL(*filter_chain_drain_timer, enableTimer(std::chrono::milliseconds(600000), _)); + listener_foo_update1->target_.ready(); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); + EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); + filter_chain_drain_timer->invokeCallback(); + + // Stop the active listener listener_foo_update1. + std::function stop_completion; + EXPECT_CALL(*worker_, stopListener(_, _)) + .WillOnce(Invoke( + [&stop_completion](Network::ListenerConfig&, std::function completion) -> void { + ASSERT_TRUE(completion != nullptr); + stop_completion = std::move(completion); + })); + EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener("foo")); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo_update1->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); + worker_->callRemovalCompletion(); + + // The snapshot of the listener manager is + // 1) listener_foo is draining filter chain. The listen socket is open. + // 2) No listen is active. Note that listener_foo_update1 is stopped. + // + // The next step is to add a listener on the same socket address and the listen socket of + // listener_foo will be duplicated. + auto listener_foo_expect_reuse_socket = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, _, _)).Times(0); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); + EXPECT_CALL(listener_foo_expect_reuse_socket->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); + + EXPECT_CALL(*listener_foo, onDestroy()); + EXPECT_CALL(*listener_foo_expect_reuse_socket, onDestroy()); +} + TEST(ListenerMessageUtilTest, ListenerMessageSameAreEquivalent) { envoy::config::listener::v3::Listener listener1; envoy::config::listener::v3::Listener listener2; diff --git a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml index d249a8dbb745..e79bd34b52e7 100644 --- a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml +++ b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml @@ -10,8 +10,3 @@ tracing: collector_cluster: zipkin collector_endpoint: "/api/v1/spans" collector_endpoint_version: HTTP_JSON -layered_runtime: - layers: - - name: static_layer - static_layer: - envoy.test_only.broken_in_production.enable_deprecated_v2_api: true diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index be0dc2e76959..e19a5ebb8af2 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -200,11 +200,7 @@ void TestEnvironment::initializeTestMain(char* program_name) { RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); #endif -#ifdef __APPLE__ - UNREFERENCED_PARAMETER(program_name); -#else absl::InitializeSymbolizer(program_name); -#endif #ifdef ENVOY_HANDLE_SIGNALS // Enabled by default. Control with "bazel --define=signal_trace=disabled" diff --git a/test/test_common/resources.h b/test/test_common/resources.h index 9dcc8ec54b6b..323bbd9971a3 100644 --- a/test/test_common/resources.h +++ b/test/test_common/resources.h @@ -16,6 +16,7 @@ class TypeUrlValues { const std::string Cluster{"type.googleapis.com/envoy.config.cluster.v3.Cluster"}; const std::string ClusterLoadAssignment{ "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"}; + const std::string LbEndpoint{"type.googleapis.com/envoy.config.endpoint.v3.LbEndpoint"}; const std::string Secret{"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret"}; const std::string RouteConfiguration{ "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"}; diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index de3b4e23db0f..d2ec6d84fc8b 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -330,8 +330,16 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { simulated_scheduler_.disableAlarm(*this); } +void SimulatedTimeSystemHelper::maybeLogTimerWarning() { + if (++warning_logged_ == 1) { + ENVOY_LOG_MISC(warn, "Simulated timer enabled. Use advanceTimeWait or " + "advanceTimeAsync functions to ensure it is called."); + } +} + void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( const std::chrono::microseconds duration, const ScopeTrackedObject* /*scope*/) { + time_system_.maybeLogTimerWarning(); simulated_scheduler_.enableAlarm(*this, duration); } diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 16c09c67299f..c6dd4405b7e6 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -96,6 +96,8 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { } void waitForNoPendingLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void maybeLogTimerWarning(); + RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_; MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_); SystemTime system_time_ ABSL_GUARDED_BY(mutex_); @@ -103,6 +105,7 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { std::set schedulers_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; uint32_t pending_updates_ ABSL_GUARDED_BY(mutex_); + std::atomic warning_logged_{}; }; // Represents a simulated time system, where time is advanced by calling diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index fe0db0bcf7ee..865d84f3aa05 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -168,6 +168,11 @@ Stats::TextReadoutSharedPtr TestUtility::findTextReadout(Stats::Store& store, return findByName(store.textReadouts(), name); } +Stats::ParentHistogramSharedPtr TestUtility::findHistogram(Stats::Store& store, + const std::string& name) { + return findByName(store.histograms(), name); +} + AssertionResult TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, Event::TestTimeSystem& time_system, std::chrono::milliseconds timeout, @@ -238,6 +243,14 @@ AssertionResult TestUtility::waitForGaugeEq(Stats::Store& store, const std::stri return AssertionSuccess(); } +AssertionResult TestUtility::waitForGaugeDestroyed(Stats::Store& store, const std::string& name, + Event::TestTimeSystem& time_system) { + while (findGauge(store, name) != nullptr) { + time_system.advanceTimeWait(std::chrono::milliseconds(10)); + } + return AssertionSuccess(); +} + AssertionResult TestUtility::waitUntilHistogramHasSamples(Stats::Store& store, const std::string& name, Event::TestTimeSystem& time_system, diff --git a/test/test_common/utility.h b/test/test_common/utility.h index c355b5064339..2a2493fa2d4a 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -215,6 +215,15 @@ class TestUtility { */ static Stats::GaugeSharedPtr findGauge(Stats::Store& store, const std::string& name); + /** + * Find a histogram in a stats store. + * @param store supplies the stats store. + * @param name supplies the name to search for. + * @return Stats::ParentHistogramSharedPtr the histogram or nullptr if there is none. + */ + static Stats::ParentHistogramSharedPtr findHistogram(Stats::Store& store, + const std::string& name); + /** * Wait for a counter to == a given value. * @param store supplies the stats store. @@ -277,6 +286,17 @@ class TestUtility { Event::TestTimeSystem& time_system, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); + /** + * Wait for a gauge to be destroyed. + * @param store supplies the stats store. + * @param name gauge name. + * @param time_system the time system to use for waiting. + * @return AssertionSuccess() if the gauge is destroyed within a fixed timeout, else + * AssertionFailure(). + */ + static AssertionResult waitForGaugeDestroyed(Stats::Store& store, const std::string& name, + Event::TestTimeSystem& time_system); + /** * Wait for a histogram to have samples. * @param store supplies the stats store. @@ -314,7 +334,7 @@ class TestUtility { */ static std::list makeDnsResponse(const std::list& addresses, - std::chrono::seconds = std::chrono::seconds(0)); + std::chrono::seconds = std::chrono::seconds(6)); /** * List files in a given directory path diff --git a/test/test_listener.cc b/test/test_listener.cc index 12dba6668f81..89c8c9251e5a 100644 --- a/test/test_listener.cc +++ b/test/test_listener.cc @@ -14,6 +14,9 @@ void TestListener::OnTestEnd(const ::testing::TestInfo& test_info) { "]: Active singletons exist. Something is leaking. Consider " "commenting out this assert and letting the heap checker run:\n", active_singletons)); + RELEASE_ASSERT(!Thread::MainThread::isMainThreadActive(), + absl::StrCat("MainThreadLeak: [", test_info.test_suite_name(), ".", + test_info.name(), "] test exited before main thread shut down")); } } // namespace Envoy diff --git a/test/test_runner.cc b/test/test_runner.cc index c910d4fb505a..68c0b2dc6d4b 100644 --- a/test/test_runner.cc +++ b/test/test_runner.cc @@ -73,6 +73,8 @@ class RuntimeManagingListener : public ::testing::EmptyTestEventListener { } // namespace int TestRunner::RunTests(int argc, char** argv) { + Thread::TestThread test_thread; + ::testing::InitGoogleMock(&argc, argv); // We hold on to process_wide to provide RAII cleanup of process-wide // state. diff --git a/test/tools/router_check/router_check.cc b/test/tools/router_check/router_check.cc index bba8b036b229..8ac19702134c 100644 --- a/test/tools/router_check/router_check.cc +++ b/test/tools/router_check/router_check.cc @@ -2,12 +2,14 @@ #include #include +#include "source/common/common/thread.h" #include "source/exe/platform_impl.h" #include "test/test_common/test_runtime.h" #include "test/tools/router_check/router.h" int main(int argc, char* argv[]) { + Envoy::Thread::TestThread test_thread; Envoy::Options options(argc, argv); const bool enforce_coverage = options.failUnder() != 0.0; diff --git a/test/tools/wee8_compile/BUILD b/test/tools/wee8_compile/BUILD index 0f9fa2f4cf55..d35416c42783 100644 --- a/test/tools/wee8_compile/BUILD +++ b/test/tools/wee8_compile/BUILD @@ -17,5 +17,9 @@ envoy_cc_binary( envoy_cc_library( name = "wee8_compile_lib", srcs = ["wee8_compile.cc"], + copts = [ + "-Wno-non-virtual-dtor", + "-Wno-unused-parameter", + ], external_deps = ["wee8"], ) diff --git a/tools/base/requirements.in b/tools/base/requirements.in index cec88af56453..e76c666f3f84 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -18,6 +18,7 @@ frozendict gitpython jinja2 pep8-naming +ply pygithub pyreadline pytest diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index da6990801d30..1aa354990eb5 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -26,12 +26,14 @@ aio.stream==0.0.2 \ # via envoy.github.release aio.subprocess==0.0.4 \ --hash=sha256:fd504a7c02423c40fde19ad87b62932b9eaa091f5a22d26b89b452059a728750 - # via envoy.code-format.python-check + # via + # -r requirements.in + # envoy.code-format.python-check aio.tasks==0.0.4 \ --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc # via + # -r requirements.in # envoy.code-format.python-check - # via -r tools/base/requirements.in # envoy.github.abstract # envoy.github.release aiodocker==0.21.0 \ @@ -179,59 +181,42 @@ coloredlogs==15.0.1 \ # via # -r requirements.in # envoy.base.runner -coverage==5.5 \ - --hash=sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c \ - --hash=sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6 \ - --hash=sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45 \ - --hash=sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a \ - --hash=sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03 \ - --hash=sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529 \ - --hash=sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a \ - --hash=sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a \ - --hash=sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2 \ - --hash=sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6 \ - --hash=sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759 \ - --hash=sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53 \ - --hash=sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a \ - --hash=sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4 \ - --hash=sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff \ - --hash=sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502 \ - --hash=sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793 \ - --hash=sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb \ - --hash=sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905 \ - --hash=sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821 \ - --hash=sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b \ - --hash=sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81 \ - --hash=sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0 \ - --hash=sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b \ - --hash=sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3 \ - --hash=sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184 \ - --hash=sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701 \ - --hash=sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a \ - --hash=sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82 \ - --hash=sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638 \ - --hash=sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5 \ - --hash=sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083 \ - --hash=sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6 \ - --hash=sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90 \ - --hash=sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465 \ - --hash=sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a \ - --hash=sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3 \ - --hash=sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e \ - --hash=sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066 \ - --hash=sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf \ - --hash=sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b \ - --hash=sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae \ - --hash=sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669 \ - --hash=sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873 \ - --hash=sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b \ - --hash=sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6 \ - --hash=sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb \ - --hash=sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160 \ - --hash=sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c \ - --hash=sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079 \ - --hash=sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d \ - --hash=sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6 +coverage[toml]==6.0 \ + --hash=sha256:08fd55d2e00dac4c18a2fa26281076035ec86e764acdc198b9185ce749ada58f \ + --hash=sha256:11ce082eb0f7c2bbfe96f6c8bcc3a339daac57de4dc0f3186069ec5c58da911c \ + --hash=sha256:17983f6ccc47f4864fd16d20ff677782b23d1207bf222d10e4d676e4636b0872 \ + --hash=sha256:25df2bc53a954ba2ccf230fa274d1de341f6aa633d857d75e5731365f7181749 \ + --hash=sha256:274a612f67f931307706b60700f1e4cf80e1d79dff6c282fc9301e4565e78724 \ + --hash=sha256:3dfb23cc180b674a11a559183dff9655beb9da03088f3fe3c4f3a6d200c86f05 \ + --hash=sha256:43bada49697a62ffa0283c7f01bbc76aac562c37d4bb6c45d56dd008d841194e \ + --hash=sha256:4865dc4a7a566147cbdc2b2f033a6cccc99a7dcc89995137765c384f6c73110b \ + --hash=sha256:581fddd2f883379bd5af51da9233e0396b6519f3d3eeae4fb88867473be6d56e \ + --hash=sha256:5c191e01b23e760338f19d8ba2470c0dad44c8b45e41ac043b2db84efc62f695 \ + --hash=sha256:6e216e4021c934246c308fd3e0d739d9fa8a3f4ea414f584ab90ef9c1592f282 \ + --hash=sha256:72f8c99f1527c5a8ee77c890ea810e26b39fd0b4c2dffc062e20a05b2cca60ef \ + --hash=sha256:7593a49300489d064ebb6c58539f52cbbc4a2e6a4385de5e92cae1563f88a425 \ + --hash=sha256:7844a8c6a0fee401edbf578713c2473e020759267c40261b294036f9d3eb6a2d \ + --hash=sha256:7af2f8e7bb54ace984de790e897f858e88068d8fbc46c9490b7c19c59cf51822 \ + --hash=sha256:7dbda34e8e26bd86606ba8a9c13ccb114802e01758a3d0a75652ffc59a573220 \ + --hash=sha256:82b58d37c47d93a171be9b5744bcc96a0012cbf53d5622b29a49e6be2097edd7 \ + --hash=sha256:8305e14112efb74d0b5fec4df6e41cafde615c2392a7e51c84013cafe945842c \ + --hash=sha256:8426fec5ad5a6e8217921716b504e9b6e1166dc147e8443b4855e329db686282 \ + --hash=sha256:88f1810eb942e7063d051d87aaaa113eb5fd5a7fd2cda03a972de57695b8bb1a \ + --hash=sha256:8da0c4a26a831b392deaba5fdd0cd7838d173b47ce2ec3d0f37be630cb09ef6e \ + --hash=sha256:a9dbfcbc56d8de5580483cf2caff6a59c64d3e88836cbe5fb5c20c05c29a8808 \ + --hash=sha256:aa5d4d43fa18cc9d0c6e02a83de0b9729b5451a9066574bd276481474f0a53ab \ + --hash=sha256:adb0f4c3c8ba8104378518a1954cbf3d891a22c13fd0e0bf135391835f44f288 \ + --hash=sha256:b4ee5815c776dfa3958ba71c7cd4cdd8eb40d79358a18352feb19562fe4408c4 \ + --hash=sha256:b5dd5ae0a9cd55d71f1335c331e9625382239b8cede818fb62d8d2702336dbf8 \ + --hash=sha256:b78dd3eeb8f5ff26d2113c41836bac04a9ea91be54c346826b54a373133c8c53 \ + --hash=sha256:bea681309bdd88dd1283a8ba834632c43da376d9bce05820826090aad80c0126 \ + --hash=sha256:befb5ffa9faabef6dadc42622c73de168001425258f0b7e402a2934574e7a04b \ + --hash=sha256:d795a2c92fe8cb31f6e9cd627ee4f39b64eb66bf47d89d8fcf7cb3d17031c887 \ + --hash=sha256:d82cbef1220703ce56822be7fbddb40736fc1a928ac893472df8aff7421ae0aa \ + --hash=sha256:e63490e8a6675cee7a71393ee074586f7eeaf0e9341afd006c5d6f7eec7c16d7 \ + --hash=sha256:e735ab8547d8a1fe8e58dd765d6f27ac539b395f52160d767b7189f379f9be7a \ + --hash=sha256:fa816e97cfe1f691423078dffa39a18106c176f28008db017b3ce3e947c34aa5 \ + --hash=sha256:fff04bfefb879edcf616f1ce5ea6f4a693b5976bdc5e163f8464f349c25b59f0 # via # -r requirements.in # pytest-cov @@ -274,6 +259,7 @@ envoy.abstract.command==0.0.3 \ envoy.base.checker==0.0.2 \ --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 # via + # -r requirements.in # envoy.code-format.python-check # envoy.dependency.pip-check # envoy.distribution.distrotest @@ -281,7 +267,7 @@ envoy.base.checker==0.0.2 \ envoy.base.runner==0.0.4 \ --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.checker # envoy.distribution.release # envoy.docs.sphinx-runner @@ -333,18 +319,18 @@ envoy.gpg.identity==0.0.2 \ envoy.gpg.sign==0.0.3 \ --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 # via -r requirements.in -flake8-polyfill==1.0.2 \ - --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ - --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda - # via pep8-naming -flake8==3.9.2 \ - --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ - --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 +flake8==4.0.1 \ + --hash=sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d \ + --hash=sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d # via # -r requirements.in # envoy.code-format.python-check # flake8-polyfill # pep8-naming +flake8-polyfill==1.0.2 \ + --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ + --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda + # via pep8-naming frozendict==2.0.6 \ --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 @@ -361,9 +347,9 @@ gitdb==4.0.7 \ --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 # via gitpython -gitpython==3.1.18 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 +gitpython==3.1.24 \ + --hash=sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647 \ + --hash=sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5 # via -r requirements.in humanfriendly==9.2 \ --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ @@ -383,9 +369,9 @@ iniconfig==1.1.1 \ --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 # via pytest -jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 +jinja2==3.0.2 \ + --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45 \ + --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c # via # -r requirements.in # sphinx @@ -507,21 +493,25 @@ pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 # via pytest +ply==3.11 \ + --hash=sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3 \ + --hash=sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce + # via -r requirements.in py==1.10.0 \ --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a # via pytest -pycodestyle==2.7.0 \ - --hash=sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068 \ - --hash=sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef +pycodestyle==2.8.0 \ + --hash=sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20 \ + --hash=sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f # via flake8 pycparser==2.20 \ --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 # via cffi -pyflakes==2.3.1 \ - --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \ - --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db +pyflakes==2.4.0 \ + --hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c \ + --hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e # via flake8 pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ @@ -566,17 +556,6 @@ pyparsing==2.4.7 \ pyreadline==2.1 \ --hash=sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1 # via -r requirements.in -pytest-asyncio==0.15.1 \ - --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ - --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea - # via -r requirements.in -pytest-cov==2.12.1 \ - --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ - --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 - # via -r requirements.in -pytest-patches==0.0.3 \ - --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a - # via -r requirements.in pytest==6.2.5 \ --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 @@ -585,6 +564,17 @@ pytest==6.2.5 \ # pytest-asyncio # pytest-cov # pytest-patches +pytest-asyncio==0.16.0 \ + --hash=sha256:5f2a21273c47b331ae6aa5b36087047b4899e40f03f18397c0e65fa5cca54e9b \ + --hash=sha256:7496c5977ce88c34379df64a66459fe395cd05543f0a2f837016e7144391fcfb + # via -r requirements.in +pytest-cov==3.0.0 \ + --hash=sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6 \ + --hash=sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470 + # via -r requirements.in +pytest-patches==0.0.3 \ + --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a + # via -r requirements.in python-gnupg==0.4.7 \ --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae @@ -593,36 +583,40 @@ pytz==2021.1 \ --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da \ --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 # via babel -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 +pyyaml==6.0 \ + --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \ + --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \ + --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \ + --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \ + --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \ + --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \ + --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \ + --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \ + --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \ + --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \ + --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \ + --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \ + --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \ + --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \ + --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \ + --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \ + --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \ + --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \ + --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \ + --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \ + --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \ + --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \ + --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \ + --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \ + --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \ + --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \ + --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \ + --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \ + --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \ + --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \ + --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \ + --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \ + --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5 # via # -r requirements.in # envoy.base.utils @@ -641,7 +635,7 @@ six==1.16.0 \ slackclient==2.9.3 \ --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 \ --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 - # via -r tools/base/requirements.in + # via -r requirements.in smmap==4.0.0 \ --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 @@ -655,6 +649,7 @@ sphinx==4.2.0 \ --hash=sha256:98a535c62a4fcfcc362528592f69b26f7caec587d32cd55688db580be0287ae0 # via # -r requirements.in + # envoy.docs.sphinx-runner # sphinx-copybutton # sphinx-rtd-theme # sphinx-tabs @@ -663,15 +658,21 @@ sphinx==4.2.0 \ sphinx-copybutton==0.4.0 \ --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinx-rtd-theme==1.0.0 \ --hash=sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8 \ --hash=sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinx-tabs==3.2.0 \ --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 @@ -684,9 +685,9 @@ sphinxcontrib-htmlhelp==2.0.0 \ --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 # via sphinx -sphinxcontrib-httpdomain==1.7.0 \ - --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ - --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 +sphinxcontrib-httpdomain==1.8.0 \ + --hash=sha256:2059cfabd0cca8fcc3455cc8ffad92f0915a7d3bb03bfddba078a6a0f35beec5 \ + --hash=sha256:a3396d6350728d574f52458b400f0ac848f8b6913bd41fed95d391d3ffbbade3 # via # -r requirements.in # envoy.docs.sphinx-runner @@ -714,9 +715,11 @@ sphinxext-rediraffe==0.2.7 \ toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f - # via - # pytest - # pytest-cov + # via pytest +tomli==1.2.1 \ + --hash=sha256:8dd0e9524d6f386271a36b41dbf6c57d8e32fd96fd22b6584679dc569d20899f \ + --hash=sha256:a5b75cb6f3968abb47af1b40c1819dc519ea82bcc065776a866e8d74c5ca9442 + # via coverage trycast==0.3.0 \ --hash=sha256:1b7b4c0d4b0d674770a53f34a762e52a6cd6879eb251ab21625602699920080d \ --hash=sha256:687185b812e8d1c45f2ba841e8de7bdcdee0695dcf3464f206800505d4c65f26 @@ -728,6 +731,7 @@ typing-extensions==3.10.0.2 \ # via # aiodocker # aiohttp + # gitpython uritemplate==3.0.1 \ --hash=sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f \ --hash=sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae @@ -794,9 +798,9 @@ yarl==1.6.3 \ # via aiohttp # The following packages are considered to be unsafe in a requirements file: -setuptools==58.0.4 \ - --hash=sha256:69cc739bc2662098a68a9bc575cd974a57969e70c1d58ade89d104ab73d79770 \ - --hash=sha256:f10059f0152e0b7fb6b2edd77bcb1ecd4c9ed7048a826eb2d79f72fd2e6e237b +setuptools==58.2.0 \ + --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11 \ + --hash=sha256:2c55bdb85d5bb460bd2e3b12052b677879cffcf46c0c688f2e5bf51d36001145 # via # -r requirements.in # sphinx diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index d01897fc338a..b1c9853ec809 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -86,9 +86,9 @@ # Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing # ones were grandfathered as part of PR #8484 for backwards compatibility. HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST = ( - "downstream_cx_length_ms", "downstream_cx_length_ms", "initialization_time_ms", - "loop_duration_us", "poll_delay_us", "request_time_ms", "upstream_cx_connect_ms", - "upstream_cx_length_ms") + "cx_rtt_us", "cx_rtt_variance_us", "downstream_cx_length_ms", "downstream_cx_length_ms", + "initialization_time_ms", "loop_duration_us", "poll_delay_us", "request_time_ms", + "upstream_cx_connect_ms", "upstream_cx_length_ms") # Files in these paths can use std::regex STD_REGEX_ALLOWLIST = ( @@ -266,7 +266,6 @@ UNSORTED_FLAGS = { "envoy.reloadable_features.activate_timers_next_event_loop", - "envoy.reloadable_features.check_ocsp_policy", "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", "envoy.reloadable_features.upstream_http2_flood_checks", "envoy.reloadable_features.header_map_correctly_coalesce_cookies", @@ -531,7 +530,8 @@ def check_runtime_flags(self, file_path, error_messages): continue if "}" in line: break - + if "//" in line: + continue match = FLAG_REGEX.match(line) if not match: error_messages.append("%s does not look like a reloadable flag" % line) @@ -682,9 +682,11 @@ def check_source_line(self, line, file_path, report_error): if "RealTimeSource" in line or \ ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ - "std::this_thread::sleep_for" in line or self.has_cond_var_wait_for(line): + "std::this_thread::sleep_for" in line or self.has_cond_var_wait_for(line) or \ + " usleep(" in line or "::usleep(" in line: report_error( - "Don't reference real-world time sources from production code; use injection") + "Don't reference real-world time sources; use TimeSystem::advanceTime(Wait|Async)" + ) duration_arg = DURATION_VALUE_REGEX.search(line) if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0": # Matching duration(int-const or float-const) other than zero @@ -1172,7 +1174,7 @@ def owned_directories(error_messages): owned = [] maintainers = [ '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@asraa', - '@yanavlasov', '@junr03', '@dio', '@jmarantz', '@antoniovicente' + '@yanavlasov', '@junr03', '@dio', '@jmarantz', '@antoniovicente', '@ggreenway' ] try: diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 9353ec65256e..f3007eb9fc10 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -163,7 +163,7 @@ def run_checks(): errors += check_unfixable_error("shared_mutex.cc", "shared_mutex") errors += check_unfixable_error("shared_mutex.cc", "shared_mutex") real_time_inject_error = ( - "Don't reference real-world time sources from production code; use injection") + "Don't reference real-world time sources; use TimeSystem::advanceTime(Wait|Async)") errors += check_unfixable_error("real_time_source.cc", real_time_inject_error) errors += check_unfixable_error("real_time_system.cc", real_time_inject_error) errors += check_unfixable_error( diff --git a/tools/code_format/requirements.txt b/tools/code_format/requirements.txt index bb703224ca3f..a383645319d7 100644 --- a/tools/code_format/requirements.txt +++ b/tools/code_format/requirements.txt @@ -8,9 +8,9 @@ flake8-polyfill==1.0.2 \ --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda # via pep8-naming -flake8==3.9.2 \ - --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ - --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 +flake8==4.0.1 \ + --hash=sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d \ + --hash=sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d # via # -r tools/code_format/requirements.txt # flake8-polyfill @@ -22,13 +22,13 @@ pep8-naming==0.12.1 \ --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 # via -r tools/code_format/requirements.txt -pycodestyle==2.7.0 \ - --hash=sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068 \ - --hash=sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef +pycodestyle==2.8.0 \ + --hash=sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20 \ + --hash=sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f # via flake8 -pyflakes==2.3.1 \ - --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \ - --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db +pyflakes==2.4.0 \ + --hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e \ + --hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c # via flake8 yapf==0.31.0 \ --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \ diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py index 5cdee9a90af3..af4e70aa7cd0 100755 --- a/tools/dependency/cve_scan.py +++ b/tools/dependency/cve_scan.py @@ -72,9 +72,25 @@ # See https://nvd.nist.gov/vuln/detail/CVE-2021-22940 'CVE-2021-22918', 'CVE-2021-22921', + 'CVE-2021-22930', 'CVE-2021-22931', 'CVE-2021-22939', 'CVE-2021-22940', + # + # Currently, cvescan does not respect/understand versions (see #18354). + # + # The following CVEs target versions that are not currently used in the Envoy repo. + # + # libcurl + "CVE-2021-22945", + # + # kafka + 'CVE-2021-38153', + # + # wasmtime + "CVE-2021-39216", + "CVE-2021-39218", + "CVE-2021-39219", ]) # Subset of CVE fields that are useful below. diff --git a/tools/dependency/release_dates.py b/tools/dependency/release_dates.py index 10733549ae77..aed102107189 100644 --- a/tools/dependency/release_dates.py +++ b/tools/dependency/release_dates.py @@ -13,34 +13,56 @@ import os import sys +import argparse +import string + +import pytz import github import exports import utils - from colorama import Fore, Style from packaging import version +# Tag issues created with these labels. +LABELS = ['dependencies', 'area/build', 'no stalebot'] +GITHUB_REPO_LOCATION = "envoyproxy/envoy" + +BODY_TPL = """ +Package Name: ${dep} +Current Version: ${metadata_version}@${release_date} +Available Version: ${tag_name}@${created_at} +Upstream releases: https://github.com/${package_name}/releases +""" + +CLOSING_TPL = """ +New version is available for this package +New Version: ${tag_name}@${created_at} +Upstream releases: https://github.com/${full_name}/releases +New Issue Link: https://github.com/${repo_location}/issues/${number} +""" + # Thrown on errors related to release date or version. class ReleaseDateVersionError(Exception): pass +# Errors that happen during issue creation. +class DependencyUpdateError(Exception): + pass + + # Format a datetime object as UTC YYYY-MM-DD. def format_utc_date(date): - # We only handle naive datetime objects right now, which is what PyGithub - # appears to be handing us. - if date.tzinfo is not None: - raise ReleaseDateVersionError( - "Expected UTC date without timezone information. Received timezone information") + date = date.replace(tzinfo=pytz.UTC) return date.date().isoformat() # Obtain latest release version and compare against metadata version, warn on # mismatch. -def verify_and_print_latest_release(dep, repo, metadata_version, release_date): +def verify_and_print_latest_release(dep, repo, metadata_version, release_date, create_issue=False): try: latest_release = repo.get_latest_release() except github.GithubException as err: @@ -51,6 +73,122 @@ def verify_and_print_latest_release(dep, repo, metadata_version, release_date): print( f'{Fore.YELLOW}*WARNING* {dep} has a newer release than {metadata_version}@<{release_date}>: ' f'{latest_release.tag_name}@<{latest_release.created_at}>{Style.RESET_ALL}') + # check for --check_deps flag, To run this only on github action schedule + # and it does not bloat CI on every push + if create_issue: + create_issues(dep, repo, metadata_version, release_date, latest_release) + + +def is_sha(text): + if len(text) != 40: + return False + try: + int(text, 16) + except ValueError: + return False + return True + + +# create issue for stale dependency +def create_issues(dep, package_repo, metadata_version, release_date, latest_release): + """Create issues in GitHub. + + Args: + dep : name of the deps + package_repo: package Url + metadata_version: current version information + release_date : old release_date + latest_release : latest_release (name and date ) + """ + access_token = os.getenv('GITHUB_TOKEN') + git = github.Github(access_token) + repo = git.get_repo(GITHUB_REPO_LOCATION) + # Find GitHub label objects for LABELS. + labels = [] + for label in repo.get_labels(): + if label.name in LABELS: + labels.append(label.name) + if len(labels) != len(LABELS): + raise DependencyUpdateError('Unknown labels (expected %s, got %s)' % (LABELS, labels)) + # trunctate metadata_version to 7 char if its sha_hash + if is_sha(metadata_version): + metadata_version = metadata_version[0:7] + title = f'Newer release available `{dep}`: {latest_release.tag_name} (current: {metadata_version})' + # search for old package opened issue and close them + body = string.Template(BODY_TPL).substitute( + dep=dep, + metadata_version=metadata_version, + release_date=release_date, + tag_name=latest_release.tag_name, + created_at=latest_release.created_at, + package_name=package_repo.full_name) + if issues_exist(title, git): + print("Issue with %s already exists" % title) + print(' >> Issue already exists, not posting!') + return + print('Creating issues...') + try: + issue_created = repo.create_issue(title, body=body, labels=LABELS) + latest_release.latest_issue_number = issue_created.number + except github.GithubException as e: + print(f'Unable to create issue, received error: {e}') + raise + search_old_version_open_issue_exist(title, git, package_repo, latest_release) + + +# checks if issue exist +def issues_exist(title, git): + # search for common title + title_search = title[0:title.index("(") - 1] + query = f'repo:{GITHUB_REPO_LOCATION} {title_search} in:title' + try: + issues = git.search_issues(query) + except github.GithubException as e: + print(f'There is a problem looking for issue title: {title}, received {e}') + raise + return issues.totalCount > 0 + + +# search for issue by title and delete old issue if new package version is available +def search_old_version_open_issue_exist(title, git, package_repo, latest_release): + # search for only "Newer release available {dep}:" as will be common in dep issue + title_search = title[0:title.index(":")] + query = f'repo:{GITHUB_REPO_LOCATION} {title_search} in:title is:open' + # there might be more than one issue + # if current package version == issue package version no need to do anything, right issue is open + # if current package version != issue_title_version means a newer updated version is available + # and close old issue + issues = git.search_issues(query) + for issue in issues: + issue_version = get_package_version_from_issue(issue.title) + if issue_version != latest_release.tag_name: + close_old_issue(git, issue.number, latest_release, package_repo) + + +def get_package_version_from_issue(issue_title): + # issue title create by github action has two form + return issue_title.split(":")[1].split("(")[0].strip() + + +def close_old_issue(git, issue_number, latest_release, package_repo): + repo = git.get_repo(GITHUB_REPO_LOCATION) + closing_comment = string.Template(CLOSING_TPL) + try: + issue = repo.get_issue(number=issue_number) + print(f'Publishing closing comment... ') + issue.create_comment( + closing_comment.substitute( + tag_name=latest_release.tag_name, + created_at=latest_release.created_at, + full_name=package_repo.full_name, + repo_location=GITHUB_REPO_LOCATION, + number=latest_release.latest_issue_number)) + print(f'Closing this issue as new package is available') + issue.edit(state='closed') + except github.GithubException as e: + print(f'There was a problem in publishing comment or closing this issue {e}') + raise + return # Print GitHub release date, throw ReleaseDateVersionError on mismatch with metadata release date. @@ -106,7 +244,7 @@ def get_untagged_release_date(repo, metadata_version, github_release): # Verify release dates in metadata against GitHub API. -def verify_and_print_release_dates(repository_locations, github_instance): +def verify_and_print_release_dates(repository_locations, github_instance, create_issue=False): for dep, metadata in sorted(repository_locations.items()): release_date = None # Obtain release information from GitHub API. @@ -122,7 +260,8 @@ def verify_and_print_release_dates(repository_locations, github_instance): release_date = get_untagged_release_date(repo, metadata['version'], github_release) if release_date: # Check whether there is a more recent version and warn if necessary. - verify_and_print_latest_release(dep, repo, github_release.version, release_date) + verify_and_print_latest_release( + dep, repo, github_release.version, release_date, create_issue) # Verify that the release date in metadata and GitHub correspond, # otherwise throw ReleaseDateVersionError. verify_and_print_release_date(dep, release_date, metadata['release_date']) @@ -132,19 +271,23 @@ def verify_and_print_release_dates(repository_locations, github_instance): if __name__ == '__main__': - if len(sys.argv) != 2: - print('Usage: %s ' % sys.argv[0]) - sys.exit(1) + # parsing location and github_action flag with argparse + parser = argparse.ArgumentParser() + parser.add_argument('location', type=str) + parser.add_argument('--create_issues', action='store_true') + args = parser.parse_args() access_token = os.getenv('GITHUB_TOKEN') if not access_token: print('Missing GITHUB_TOKEN') sys.exit(1) - path = sys.argv[1] + path = args.location + create_issue = args.create_issues spec_loader = exports.repository_locations_utils.load_repository_locations_spec path_module = exports.load_module('repository_locations', path) try: verify_and_print_release_dates( - spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC), github.Github(access_token)) + spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC), github.Github(access_token), + create_issue) except ReleaseDateVersionError as e: print( f'{Fore.RED}An error occurred while processing {path}, please verify the correctness of the ' diff --git a/tools/dependency/release_dates.sh b/tools/dependency/release_dates.sh index de12f53e4512..51b48039a3ce 100755 --- a/tools/dependency/release_dates.sh +++ b/tools/dependency/release_dates.sh @@ -7,4 +7,4 @@ set -e # TODO(phlax): move this job to bazel and remove this export API_PATH=api/ -PYTHONPATH=. python_venv release_dates "$1" +PYTHONPATH=. python_venv release_dates "$@" diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index f741cb209c9f..b155ec330df6 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -2,99 +2,96 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --generate-hashes tools/dependency/requirements.txt +# pip-compile --allow-unsafe --generate-hashes requirements.in # -certifi==2021.5.30 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 - # via - # -r tools/dependency/requirements.txt - # requests -cffi==1.14.5 \ - --hash=sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813 \ - --hash=sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06 \ - --hash=sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea \ - --hash=sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee \ - --hash=sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396 \ - --hash=sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73 \ - --hash=sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315 \ - --hash=sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1 \ - --hash=sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49 \ - --hash=sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892 \ - --hash=sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482 \ - --hash=sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058 \ - --hash=sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5 \ - --hash=sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53 \ - --hash=sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045 \ - --hash=sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3 \ - --hash=sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5 \ - --hash=sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e \ - --hash=sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c \ - --hash=sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369 \ - --hash=sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827 \ - --hash=sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053 \ - --hash=sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa \ - --hash=sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4 \ - --hash=sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322 \ - --hash=sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132 \ - --hash=sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62 \ - --hash=sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa \ - --hash=sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0 \ - --hash=sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396 \ - --hash=sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e \ - --hash=sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991 \ - --hash=sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6 \ - --hash=sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1 \ - --hash=sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406 \ - --hash=sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d \ - --hash=sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c - # via - # -r tools/dependency/requirements.txt - # pynacl -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via -r tools/dependency/requirements.txt -charset-normalizer==2.0.5 \ - --hash=sha256:fa471a601dfea0f492e4f4fca035cd82155e65dc45c9b83bf4322dfab63755dd \ - --hash=sha256:7098e7e862f6370a2a8d1a6398cd359815c45d12626267652c3f13dec58e2367 +certifi==2021.10.8 \ + --hash=sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569 \ + --hash=sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872 + # via requests +cffi==1.15.0 \ + --hash=sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962 \ + --hash=sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0 \ + --hash=sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14 \ + --hash=sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27 \ + --hash=sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023 \ + --hash=sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474 \ + --hash=sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6 \ + --hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \ + --hash=sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e \ + --hash=sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7 \ + --hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \ + --hash=sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c \ + --hash=sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962 \ + --hash=sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382 \ + --hash=sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55 \ + --hash=sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0 \ + --hash=sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e \ + --hash=sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39 \ + --hash=sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc \ + --hash=sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032 \ + --hash=sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8 \ + --hash=sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605 \ + --hash=sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e \ + --hash=sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc \ + --hash=sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636 \ + --hash=sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4 \ + --hash=sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997 \ + --hash=sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b \ + --hash=sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2 \ + --hash=sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7 \ + --hash=sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66 \ + --hash=sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029 \ + --hash=sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880 \ + --hash=sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20 \ + --hash=sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024 \ + --hash=sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e \ + --hash=sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728 \ + --hash=sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6 \ + --hash=sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c \ + --hash=sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443 \ + --hash=sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a \ + --hash=sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37 \ + --hash=sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a \ + --hash=sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e \ + --hash=sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796 \ + --hash=sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df \ + --hash=sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8 \ + --hash=sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a \ + --hash=sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139 \ + --hash=sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954 + # via pynacl +charset-normalizer==2.0.7 \ + --hash=sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b \ + --hash=sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0 # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/dependency/requirements.txt + # via -r requirements.in deprecated==1.2.13 \ - --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ - --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d - # via - # -r tools/dependency/requirements.txt - # pygithub -idna==2.10 \ - --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ - --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r tools/dependency/requirements.txt - # requests + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d + # via pygithub +idna==3.3 \ + --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ + --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d + # via requests packaging==21.0 \ --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 - # via -r tools/dependency/requirements.txt + # via -r requirements.in pycparser==2.20 \ --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 - # via - # -r tools/dependency/requirements.txt - # cffi + # via cffi pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/dependency/requirements.txt -pyjwt==2.1.0 \ - --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ - --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via -r requirements.in +pyjwt==2.2.0 \ + --hash=sha256:b0ed5824c8ecc5362e540c65dc6247567db130c4226670bf7699aec92fb4dae1 \ + --hash=sha256:a0b9a3b4e5ca5517cac9f1a6e9cd30bf1aa80be74fcdf4e28eded582ecfcfbae + # via pygithub pynacl==1.4.0 \ --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \ --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \ @@ -114,35 +111,70 @@ pynacl==1.4.0 \ --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \ --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \ --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via pygithub pyparsing==2.4.7 \ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via - # -r tools/dependency/requirements.txt - # packaging + # via packaging +pytz==2021.3 \ + --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \ + --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326 + # via -r requirements.in requests==2.26.0 \ --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 - # via - # -r tools/dependency/requirements.txt - # pygithub + # via pygithub six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # -r tools/dependency/requirements.txt - # pynacl -urllib3==1.26.6 \ - --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ - --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f - # via - # -r tools/dependency/requirements.txt - # requests -wrapt==1.12.1 \ - --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 - # via - # -r tools/dependency/requirements.txt - # deprecated + # via pynacl +urllib3==1.26.7 \ + --hash=sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844 \ + --hash=sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece + # via requests +wrapt==1.13.2 \ + --hash=sha256:3de7b4d3066cc610054e7aa2c005645e308df2f92be730aae3a47d42e910566a \ + --hash=sha256:8164069f775c698d15582bf6320a4f308c50d048c1c10cf7d7a341feaccf5df7 \ + --hash=sha256:9adee1891253670575028279de8365c3a02d3489a74a66d774c321472939a0b1 \ + --hash=sha256:a70d876c9aba12d3bd7f8f1b05b419322c6789beb717044eea2c8690d35cb91b \ + --hash=sha256:3f87042623530bcffea038f824b63084180513c21e2e977291a9a7e65a66f13b \ + --hash=sha256:e634136f700a21e1fcead0c137f433dde928979538c14907640607d43537d468 \ + --hash=sha256:3e33c138d1e3620b1e0cc6fd21e46c266393ed5dae0d595b7ed5a6b73ed57aa0 \ + --hash=sha256:283e402e5357e104ac1e3fba5791220648e9af6fb14ad7d9cc059091af2b31d2 \ + --hash=sha256:ccb34ce599cab7f36a4c90318697ead18312c67a9a76327b3f4f902af8f68ea1 \ + --hash=sha256:fbad5ba74c46517e6488149514b2e2348d40df88cd6b52a83855b7a8bf04723f \ + --hash=sha256:724ed2bc9c91a2b9026e5adce310fa60c6e7c8760b03391445730b9789b9d108 \ + --hash=sha256:83f2793ec6f3ef513ad8d5b9586f5ee6081cad132e6eae2ecb7eac1cc3decae0 \ + --hash=sha256:0473d1558b93e314e84313cc611f6c86be779369f9d3734302bf185a4d2625b1 \ + --hash=sha256:15eee0e6fd07f48af2f66d0e6f2ff1916ffe9732d464d5e2390695296872cad9 \ + --hash=sha256:bc85d17d90201afd88e3d25421da805e4e135012b5d1f149e4de2981394b2a52 \ + --hash=sha256:c6ee5f8734820c21b9b8bf705e99faba87f21566d20626568eeb0d62cbeaf23c \ + --hash=sha256:53c6706a1bcfb6436f1625511b95b812798a6d2ccc51359cd791e33722b5ea32 \ + --hash=sha256:fbe6aebc9559fed7ea27de51c2bf5c25ba2a4156cf0017556f72883f2496ee9a \ + --hash=sha256:0582180566e7a13030f896c2f1ac6a56134ab5f3c3f4c5538086f758b1caf3f2 \ + --hash=sha256:bff0a59387a0a2951cb869251257b6553663329a1b5525b5226cab8c88dcbe7e \ + --hash=sha256:df3eae297a5f1594d1feb790338120f717dac1fa7d6feed7b411f87e0f2401c7 \ + --hash=sha256:1eb657ed84f4d3e6ad648483c8a80a0cf0a78922ef94caa87d327e2e1ad49b48 \ + --hash=sha256:a0cdedf681db878416c05e1831ec69691b0e6577ac7dca9d4f815632e3549580 \ + --hash=sha256:87ee3c73bdfb4367b26c57259995935501829f00c7b3eed373e2ad19ec21e4e4 \ + --hash=sha256:3e0d16eedc242d01a6f8cf0623e9cdc3b869329da3f97a15961d8864111d8cf0 \ + --hash=sha256:8318088860968c07e741537030b1abdd8908ee2c71fbe4facdaade624a09e006 \ + --hash=sha256:d90520616fce71c05dedeac3a0fe9991605f0acacd276e5f821842e454485a70 \ + --hash=sha256:22142afab65daffc95863d78effcbd31c19a8003eca73de59f321ee77f73cadb \ + --hash=sha256:d0d717e10f952df7ea41200c507cc7e24458f4c45b56c36ad418d2e79dacd1d4 \ + --hash=sha256:593cb049ce1c391e0288523b30426c4430b26e74c7e6f6e2844bd99ac7ecc831 \ + --hash=sha256:8860c8011a6961a651b1b9f46fdbc589ab63b0a50d645f7d92659618a3655867 \ + --hash=sha256:ada5e29e59e2feb710589ca1c79fd989b1dd94d27079dc1d199ec954a6ecc724 \ + --hash=sha256:fdede980273aeca591ad354608778365a3a310e0ecdd7a3587b38bc5be9b1808 \ + --hash=sha256:af9480de8e63c5f959a092047aaf3d7077422ded84695b3398f5d49254af3e90 \ + --hash=sha256:c65e623ea7556e39c4f0818200a046cbba7575a6b570ff36122c276fdd30ab0a \ + --hash=sha256:b20703356cae1799080d0ad15085dc3213c1ac3f45e95afb9f12769b98231528 \ + --hash=sha256:1c5c4cf188b5643a97e87e2110bbd4f5bc491d54a5b90633837b34d5df6a03fe \ + --hash=sha256:82223f72eba6f63eafca87a0f614495ae5aa0126fe54947e2b8c023969e9f2d7 \ + --hash=sha256:81a4cf257263b299263472d669692785f9c647e7dca01c18286b8f116dbf6b38 \ + --hash=sha256:728e2d9b7a99dd955d3426f237b940fc74017c4a39b125fec913f575619ddfe9 \ + --hash=sha256:7574de567dcd4858a2ffdf403088d6df8738b0e1eabea220553abf7c9048f59e \ + --hash=sha256:c7ac2c7a8e34bd06710605b21dd1f3576764443d68e069d2afba9b116014d072 \ + --hash=sha256:6e6d1a8eeef415d7fb29fe017de0e48f45e45efd2d1bfda28fc50b7b330859ef \ + --hash=sha256:dca56cc5963a5fd7c2aa8607017753f534ee514e09103a6c55d2db70b50e7447 + # via deprecated diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py index e47107fe1463..1f4424140229 100644 --- a/tools/dependency/utils.py +++ b/tools/dependency/utils.py @@ -39,11 +39,11 @@ def get_github_release_from_urls(urls): if components[5] == 'archive': # Only support .tar.gz, .zip today. Figure out the release tag from this # filename. - if components[6].endswith('.tar.gz'): - github_version = components[6][:-len('.tar.gz')] + if components[-1].endswith('.tar.gz'): + github_version = components[-1][:-len('.tar.gz')] else: - assert (components[6].endswith('.zip')) - github_version = components[6][:-len('.zip')] + assert (components[-1].endswith('.zip')) + github_version = components[-1][:-len('.zip')] else: # Release tag is a path component. assert (components[5] == 'releases') diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index f35c0b97f0b1..91e75d9c5864 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -219,8 +219,7 @@ def validate_data_plane_core_deps(self): # probably have more precise tagging of dataplane/controlplane/other deps in # these paths. queried_dataplane_core_min_deps = self._build_graph.query_external_deps( - '//source/common/api/...', '//source/common/buffer/...', - '//source/common/chromium_url/...', '//source/common/crypto/...', + '//source/common/api/...', '//source/common/buffer/...', '//source/common/crypto/...', '//source/common/conn_pool/...', '//source/common/formatter/...', '//source/common/http/...', '//source/common/ssl/...', '//source/common/tcp/...', '//source/common/tcp_proxy/...', '//source/common/network/...') diff --git a/tools/deprecate_version/BUILD b/tools/deprecate_version/BUILD index abe06de4b7f8..40bb0e4c0927 100644 --- a/tools/deprecate_version/BUILD +++ b/tools/deprecate_version/BUILD @@ -7,6 +7,7 @@ py_binary( name = "deprecate_version", srcs = ["deprecate_version.py"], deps = [ + "@envoy_repo", requirement("gitpython"), requirement("pygithub"), ], diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 69e275666f14..6d1681446297 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -23,6 +23,8 @@ import github from git import Repo +import envoy_repo + try: input = raw_input # Python 2 except NameError: @@ -126,7 +128,7 @@ def create_issues(access_token, runtime_and_pr): def get_runtime_and_pr(): """Returns a list of tuples of [runtime features to deprecate, PR, commit the feature was added] """ - repo = Repo(os.getcwd()) + repo = Repo(envoy_repo.PATH) # grep source code looking for reloadable features which are true to find the # PR they were added. diff --git a/tools/docs/generate_api_rst.py b/tools/docs/generate_api_rst.py index 670400140be1..9ee68cd6efba 100644 --- a/tools/docs/generate_api_rst.py +++ b/tools/docs/generate_api_rst.py @@ -31,11 +31,11 @@ def main(): # the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # - # @envoy_api//envoy/watchdog/v3alpha:abort_action.proto + # @envoy_api//envoy/watchdog/v3:abort_action.proto # # this transforms them to a list with a "canonical" form of: # - # envoy/watchdog/v3alpha/abort_action.proto.rst + # envoy/watchdog/v3/abort_action.proto.rst # envoy_api_protos = [ f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index 80c348ae63a9..c9c204050f38 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -55,7 +55,7 @@ "envoy.stats_sinks", "envoy.thrift_proxy.filters", "envoy.tracers", "envoy.sip_proxy.filters", "envoy.transport_sockets.downstream", "envoy.transport_sockets.upstream", "envoy.tls.cert_validator", "envoy.upstreams", "envoy.wasm.runtime", "envoy.common.key_value", - "envoy.rbac.matchers") + "envoy.network.dns_resolver", "envoy.rbac.matchers") EXTENSION_STATUS_VALUES = ( # This extension is stable and is expected to be production usable. diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index 8c13694b15d2..46d23fdef933 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -71,7 +71,10 @@ def modify_compile_command(target, args): if is_header(target["file"]): options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable" options += " -Wno-unused-function" - if not target["file"].startswith("external/"): + # By treating external/envoy* as C++ files we are able to use this script from subrepos that + # depend on Envoy targets. + if not target["file"].startswith("external/") or target["file"].startswith( + "external/envoy"): # *.h file is treated as C header by default while our headers files are all C++17. options = "-x c++ -std=c++17 -fexceptions " + options diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index 435e9c7ef0bb..8d878309c698 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -180,6 +180,13 @@ def get_destination_path(src): package)) dst_path = pathlib.Path('contrib').joinpath(dst_path) + # Non-contrib can not use alpha. + if not 'contrib' in src: + if not 'v2alpha' in package and 'alpha' in package: + raise ProtoSyncError( + "package '{}' uses an alpha namespace. This is not allowed. Instead mark with " + "(xds.annotations.v3.file_status).work_in_progress or related annotation.".format( + package)) return dst_path diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 78eb118d4b69..1057becbd100 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -38,7 +38,7 @@ ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Last documented v2 api version -ENVOY_LAST_V2_VERSION = "1.17.2" +ENVOY_LAST_V2_VERSION = "1.17" # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index e32c3c95056b..4a8379bb67f8 100644 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -25,5 +25,5 @@ python_venv() { pip3 install -r "${SCRIPT_DIR}"/requirements.txt shift - python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$*" + python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$@" } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index f9d05beace12..88f04acd7cf8 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -31,6 +31,7 @@ CDN CDS CEL DSR +DSS EBADF ENOTCONN EPIPE @@ -128,6 +129,8 @@ FQDN FREEBIND FUZZER FUZZERS +dereferencing +dnsresolvers guarddog GC GCC @@ -366,6 +369,7 @@ UUID UUIDs VC VCHAR +VCL VH VHDS VLOG @@ -481,6 +485,7 @@ bursty bytecode bytestream bytestring +cacert cacheable cacheability callee @@ -497,6 +502,7 @@ canonicalizer canonicalizing cardinality casted +cfg charset checkin checksum @@ -633,6 +639,7 @@ evbuffer evbuffers evconnlistener evented +eventfd evwatch exe execlp @@ -852,6 +859,7 @@ namespaced namespaces namespacing nan +nanos natively ndk netblock @@ -1083,6 +1091,7 @@ sendto serializable serializer serv +servercert setenv setsockopt sig @@ -1300,3 +1309,4 @@ crlf ep suri transid +routable diff --git a/tools/type_whisperer/api_type_db.cc b/tools/type_whisperer/api_type_db.cc index 198a3ee4a477..20982870186f 100644 --- a/tools/type_whisperer/api_type_db.cc +++ b/tools/type_whisperer/api_type_db.cc @@ -83,8 +83,6 @@ absl::optional ApiTypeDb::getLatestTypeInformation(const std::s if (value->options().HasExtension(udpa::annotations::enum_value_migrate)) { result->renames_[value->name()] = value->options().GetExtension(udpa::annotations::enum_value_migrate).rename(); - } else if (value->options().deprecated()) { - result->renames_[value->name()] = "hidden_envoy_deprecated_" + value->name(); } } return result; @@ -98,8 +96,6 @@ absl::optional ApiTypeDb::getLatestTypeInformation(const std::s if (field->options().HasExtension(udpa::annotations::field_migrate)) { result->renames_[field->name()] = field->options().GetExtension(udpa::annotations::field_migrate).rename(); - } else if (field->options().deprecated()) { - result->renames_[field->name()] = "hidden_envoy_deprecated_" + field->name(); } } return result;