diff --git a/.changelog/17155.txt b/.changelog/17155.txt deleted file mode 100644 index 03cec33e991af..0000000000000 --- a/.changelog/17155.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -config: Add new `tls.defaults.verify_server_hostname` configuration option. This specifies the default value for any interfaces that support the `verify_server_hostname` option. -``` diff --git a/.changelog/17160.txt b/.changelog/17160.txt deleted file mode 100644 index 666a6e8f252cc..0000000000000 --- a/.changelog/17160.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -Fix a bug that wrongly trims domains when there is an overlap with DC name. -``` diff --git a/.changelog/17481.txt b/.changelog/17481.txt deleted file mode 100644 index 89ad16998e836..0000000000000 --- a/.changelog/17481.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -tlsutil: Default setting of ServerName field in outgoing TLS configuration for checks now handled by crypto/tls. -``` diff --git a/.changelog/17593.txt b/.changelog/17593.txt deleted file mode 100644 index 1f84e75f57427..0000000000000 --- a/.changelog/17593.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -docs: fix list of telemetry metrics -``` diff --git a/.changelog/17831.txt b/.changelog/17831.txt deleted file mode 100644 index 2833bda1d5765..0000000000000 --- a/.changelog/17831.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters -``` diff --git a/.github/workflows/bot-auto-approve.yaml b/.github/workflows/bot-auto-approve.yaml index 66bbba45287e4..2b652388999c2 100644 --- a/.github/workflows/bot-auto-approve.yaml +++ b/.github/workflows/bot-auto-approve.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest if: github.actor == 'hc-github-team-consul-core' steps: - - uses: hmarr/auto-approve-action@v3 # TSCCR: no entry for repository "hmarr/auto-approve-action" + - uses: hmarr/auto-approve-action@v3 with: review-message: "Auto approved Consul Bot automated PR" github-token: ${{ secrets.MERGE_APPROVE_TOKEN }} diff --git a/.github/workflows/broken-link-check.yml b/.github/workflows/broken-link-check.yml index a1ca4731d72e7..b7c89ff3e75dc 100644 --- a/.github/workflows/broken-link-check.yml +++ b/.github/workflows/broken-link-check.yml @@ -12,11 +12,11 @@ jobs: linkChecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v3 - name: Run lychee link checker id: lychee - uses: lycheeverse/lychee-action@v1.6.1 # TSCCR: no entry for repository "lycheeverse/lychee-action" + uses: lycheeverse/lychee-action@v1.6.1 with: args: ./website/content/docs/ --base https://developer.hashicorp.com/ --exclude-all-private --exclude '\.(svg|gif|jpg|png)' --exclude 'manage\.auth0\.com' --accept 403 --max-concurrency=24 --no-progress --verbose # Fail GitHub action when broken links are found? @@ -26,7 +26,7 @@ jobs: - name: Create GitHub Issue From lychee output file if: env.lychee_exit_code != 0 - uses: peter-evans/create-issue-from-file@v4 # TSCCR: no entry for repository "peter-evans/create-issue-from-file" + uses: peter-evans/create-issue-from-file@v4 with: title: Link Checker Report content-filepath: ./lychee/out.md diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml index 57e2eba8e4aec..2e87d767f5e46 100644 --- a/.github/workflows/build-artifacts.yml +++ b/.github/workflows/build-artifacts.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + GOPRIVATE: github.com/hashicorp jobs: setup: @@ -25,7 +25,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -56,14 +56,14 @@ jobs: kv/data/github/${{ github.repository }}/dockerhub username | DOCKERHUB_USERNAME; kv/data/github/${{ github.repository }}/dockerhub token | DOCKERHUB_TOKEN; - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: ENT specific step as we need to set elevated GitHub permissions. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' @@ -78,17 +78,17 @@ jobs: echo "GITHUB_BUILD_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # pin@v2.4.1 # NOTE: conditional specific logic as we store secrets in Vault in ENT and use GHA secrets in OSS. - name: Login to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # pin@v2.1.0 with: username: ${{ endsWith(github.repository, '-enterprise') && steps.secrets.outputs.DOCKERHUB_USERNAME || secrets.DOCKERHUB_USERNAME }} password: ${{ endsWith(github.repository, '-enterprise') && steps.secrets.outputs.DOCKERHUB_TOKEN || secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v4.0.0 + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # pin@v4.0.0 with: context: ./bin file: ./build-support/docker/Consul-Dev.dockerfile diff --git a/.github/workflows/build-distros.yml b/.github/workflows/build-distros.yml index 8b88345d2ee28..6f5722a82ab93 100644 --- a/.github/workflows/build-distros.yml +++ b/.github/workflows/build-distros.yml @@ -15,7 +15,6 @@ permissions: env: GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: @@ -27,7 +26,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -50,14 +49,14 @@ jobs: XC_OS: "freebsd linux windows" runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - name: Build @@ -74,14 +73,14 @@ jobs: XC_OS: "darwin freebsd linux solaris windows" runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - name: Build @@ -99,7 +98,7 @@ jobs: CGO_ENABLED: 1 GOOS: linux steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git @@ -107,7 +106,7 @@ jobs: run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: | diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9186f12bfe258..66b5a14a739c0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,6 @@ on: env: PKG_NAME: consul METADATA: oss - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: set-product-version: @@ -26,7 +25,7 @@ jobs: pre-version: ${{ steps.set-product-version.outputs.prerelease-product-version }} shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: set product version id: set-product-version uses: hashicorp/actions-set-product-version@v1 @@ -64,7 +63,7 @@ jobs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} steps: - name: 'Checkout directory' - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Generate metadata file id: generate-metadata-file uses: hashicorp/actions-generate-metadata@v1 @@ -72,7 +71,7 @@ jobs: version: ${{ needs.set-product-version.outputs.product-version }} product: ${{ env.PKG_NAME }} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@3.1.2 with: name: metadata.json path: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -96,10 +95,10 @@ jobs: name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Setup with node and yarn - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: node-version: '14' cache: 'yarn' @@ -161,13 +160,13 @@ jobs: echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@3.1.2 if: ${{ matrix.goos == 'linux' }} with: name: ${{ env.RPM_PACKAGE }} path: out/${{ env.RPM_PACKAGE }} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@3.1.2 if: ${{ matrix.goos == 'linux' }} with: name: ${{ env.DEB_PACKAGE }} @@ -185,10 +184,10 @@ jobs: name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Setup with node and yarn - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: node-version: '14' cache: 'yarn' @@ -236,7 +235,7 @@ jobs: version: ${{needs.set-product-version.outputs.product-version}} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix # This naming convention will be used ONLY for per-commit dev images @@ -270,7 +269,7 @@ jobs: version: ${{needs.set-product-version.outputs.product-version}} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - uses: hashicorp/actions-docker-build@v1 with: version: ${{env.version}} @@ -290,7 +289,7 @@ jobs: version: ${{needs.set-product-version.outputs.product-version}} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix # This naming convention will be used ONLY for per-commit dev images @@ -327,15 +326,15 @@ jobs: name: Verify ${{ matrix.arch }} linux binary steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Download ${{ matrix.arch }} zip - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{ env.zip_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # pin@v2.1.0 if: ${{ matrix.arch == 'arm' || matrix.arch == 'arm64' }} with: # this should be a comma-separated string as opposed to an array @@ -357,10 +356,10 @@ jobs: name: Verify amd64 darwin binary steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Download amd64 darwin zip - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{ env.zip_name }} @@ -384,7 +383,7 @@ jobs: name: Verify ${{ matrix.arch }} debian package steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Set package version run: | @@ -395,12 +394,12 @@ jobs: echo "pkg_name=consul_${{ env.pkg_version }}-1_${{ matrix.arch }}.deb" >> $GITHUB_ENV - name: Download workflow artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{ env.pkg_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # pin@v2.1.0 with: platforms: all @@ -421,7 +420,7 @@ jobs: name: Verify ${{ matrix.arch }} rpm steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - name: Set package version run: | @@ -432,12 +431,12 @@ jobs: echo "pkg_name=consul-${{ env.pkg_version }}-1.${{ matrix.arch }}.rpm" >> $GITHUB_ENV - name: Download workflow artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{ env.pkg_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # pin@v2.1.0 with: platforms: all diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index e6147e787aa30..d00717e2f0492 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/.github/workflows/embedded-asset-checker.yml b/.github/workflows/embedded-asset-checker.yml index ed2dc4eb950d7..4bb07771bd68f 100644 --- a/.github/workflows/embedded-asset-checker.yml +++ b/.github/workflows/embedded-asset-checker.yml @@ -20,7 +20,7 @@ jobs: if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/update-ui-assets') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )" runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/.github/workflows/frontend.yml b/.github/workflows/frontend.yml index 460709b37df60..5eab231c65a54 100644 --- a/.github/workflows/frontend.yml +++ b/.github/workflows/frontend.yml @@ -23,7 +23,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -35,9 +35,9 @@ jobs: run: working-directory: ui steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: node-version: '16' @@ -55,9 +55,9 @@ jobs: needs: setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: node-version: '16' @@ -84,9 +84,9 @@ jobs: CONSUL_NSPACES_ENABLED: ${{ endsWith(github.repository, '-enterprise') && 1 || 0 }} # NOTE: this should be 1 in ENT. JOBS: 2 # limit parallelism for broccoli-babel-transpiler steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: node-version: '16' @@ -94,7 +94,7 @@ jobs: run: npm install -g yarn - name: Install Chrome - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + uses: browser-actions/setup-chrome@29abc1a83d1d71557708563b4bc962d0f983a376 # pin@v1.2.1 - name: Install dependencies working-directory: ui diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 831271f6f8328..787f92560ec16 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -21,7 +21,6 @@ permissions: env: TEST_RESULTS: /tmp/test-results - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: @@ -33,7 +32,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -51,14 +50,14 @@ jobs: check-generated-protobuf: needs: - setup - runs-on: ${{ fromJSON(needs.setup.outputs.compute-medium) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: make proto-tools @@ -81,12 +80,12 @@ jobs: - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: make --always-make deep-copy @@ -104,12 +103,12 @@ jobs: - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./... @@ -122,11 +121,11 @@ jobs: - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: make lint-container-test-deps @@ -139,12 +138,12 @@ jobs: - setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: go install github.com/hashicorp/lint-consul-retry@master && lint-consul-retry diff --git a/.github/workflows/issue-comment-created.yml b/.github/workflows/issue-comment-created.yml index 228ac41aa76d5..01e7e13f8bc44 100644 --- a/.github/workflows/issue-comment-created.yml +++ b/.github/workflows/issue-comment-created.yml @@ -11,8 +11,8 @@ jobs: triage: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 + - uses: actions/checkout@v2 + - uses: actions-ecosystem/action-remove-labels@v1 with: labels: | waiting-reply diff --git a/.github/workflows/jira-issues.yaml b/.github/workflows/jira-issues.yaml index 6e9b2b9e959b8..d595e5f5af8c6 100644 --- a/.github/workflows/jira-issues.yaml +++ b/.github/workflows/jira-issues.yaml @@ -16,7 +16,7 @@ jobs: name: Jira Community Issue sync steps: - name: Login - uses: atlassian/gajira-login@ca13f8850ea309cf44a6e4e0c49d9aa48ac3ca4c # v3 + uses: atlassian/gajira-login@v3.0.0 env: JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} @@ -40,7 +40,7 @@ jobs: - name: Create ticket if an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: NET issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -60,7 +60,7 @@ jobs: # Education Jira - name: Create ticket in Education board an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' && contains(github.event.issue.labels.*.name, 'type/docs') - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: CE issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -77,28 +77,28 @@ jobs: - name: Search if: github.event.action != 'opened' id: search - uses: tomhjp/gh-action-jira-search@04700b457f317c3e341ce90da5a3ff4ce058f2fa # v0.2.2 + uses: tomhjp/gh-action-jira-search@v0.2.2 with: # cf[10089] is Issue Link (use JIRA API to retrieve) jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - name: Sync comment if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@6eb6b9ead70221916b6badd118c24535ed220bd9 # v0.2.0 + uses: tomhjp/gh-action-jira-comment@v0.2.0 with: issue: ${{ steps.search.outputs.issue }} comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - name: Close ticket if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@38fc9cd61b03d6a53dd35fcccda172fe04b36de3 # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "Closed" - name: Reopen ticket if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@38fc9cd61b03d6a53dd35fcccda172fe04b36de3 # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "To Do" diff --git a/.github/workflows/jira-pr.yaml b/.github/workflows/jira-pr.yaml index e18559a022b35..9bce26588ebdd 100644 --- a/.github/workflows/jira-pr.yaml +++ b/.github/workflows/jira-pr.yaml @@ -14,7 +14,7 @@ jobs: name: Jira sync steps: - name: Login - uses: atlassian/gajira-login@ca13f8850ea309cf44a6e4e0c49d9aa48ac3ca4c # v3 + uses: atlassian/gajira-login@v3.0.0 env: JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} @@ -59,7 +59,7 @@ jobs: - name: Create ticket if an issue is filed, or if PR not by a team member is opened if: ( github.event.action == 'opened' && steps.is-team-member.outputs.MESSAGE == 'false' ) - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: NET issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -79,7 +79,7 @@ jobs: # Education Jira - name: Create ticket in Education board an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' && steps.is-team-member.outputs.MESSAGE == 'false' && contains(github.event.issue.labels.*.name, 'type/docs') - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: CE issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -91,28 +91,28 @@ jobs: - name: Search if: github.event.action != 'opened' id: search - uses: tomhjp/gh-action-jira-search@04700b457f317c3e341ce90da5a3ff4ce058f2fa # v0.2.2 + uses: tomhjp/gh-action-jira-search@v0.2.2 with: # cf[10089] is Issue Link (use JIRA API to retrieve) jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - name: Sync comment if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@6eb6b9ead70221916b6badd118c24535ed220bd9 # v0.2.0 + uses: tomhjp/gh-action-jira-comment@v0.2.0 with: issue: ${{ steps.search.outputs.issue }} comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - name: Close ticket if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@38fc9cd61b03d6a53dd35fcccda172fe04b36de3 # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "Closed" - name: Reopen ticket if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@38fc9cd61b03d6a53dd35fcccda172fe04b36de3 # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "To Do" diff --git a/.github/workflows/nightly-test-1.16.x.yaml b/.github/workflows/nightly-test-1.12.x.yaml similarity index 75% rename from .github/workflows/nightly-test-1.16.x.yaml rename to .github/workflows/nightly-test-1.12.x.yaml index 98a1f364b69ea..0f016075e261a 100644 --- a/.github/workflows/nightly-test-1.16.x.yaml +++ b/.github/workflows/nightly-test-1.12.x.yaml @@ -1,28 +1,27 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Test 1.16.x +name: Nightly Test 1.12.x on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.16.x" - BRANCH_NAME: "release-1.16.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + BRANCH: "release/1.12.x" + BRANCH_NAME: "release-1.12.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload OSS Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download OSS Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.13.x.yaml b/.github/workflows/nightly-test-1.13.x.yaml index de852c9114c56..51a1226b29bec 100644 --- a/.github/workflows/nightly-test-1.13.x.yaml +++ b/.github/workflows/nightly-test-1.13.x.yaml @@ -8,21 +8,20 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.13.x" - BRANCH_NAME: "release-1.13.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "release-1.13.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload OSS Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download OSS Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.14.x.yaml b/.github/workflows/nightly-test-1.14.x.yaml index 1f319b4bd3ec8..86f48c37a144b 100644 --- a/.github/workflows/nightly-test-1.14.x.yaml +++ b/.github/workflows/nightly-test-1.14.x.yaml @@ -8,21 +8,20 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.14.x" - BRANCH_NAME: "release-1.14.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "release-1.14.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload OSS Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download OSS Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.15.x.yaml b/.github/workflows/nightly-test-1.15.x.yaml index d41cf84a62479..7fdc9247be724 100644 --- a/.github/workflows/nightly-test-1.15.x.yaml +++ b/.github/workflows/nightly-test-1.15.x.yaml @@ -8,21 +8,20 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.15.x" - BRANCH_NAME: "release-1.15.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "release-1.15.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload OSS Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download OSS Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-main.yaml b/.github/workflows/nightly-test-main.yaml index 13048656b6b0f..3fc316a1a3549 100644 --- a/.github/workflows/nightly-test-main.yaml +++ b/.github/workflows/nightly-test-main.yaml @@ -8,21 +8,20 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "main" - BRANCH_NAME: "main" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "main" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload OSS Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download OSS Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-oss-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/oss-merge-trigger.yml b/.github/workflows/oss-merge-trigger.yml index 9146f7bc22148..4a4fdaa208e3d 100644 --- a/.github/workflows/oss-merge-trigger.yml +++ b/.github/workflows/oss-merge-trigger.yml @@ -8,7 +8,7 @@ on: - closed branches: - main - - release/** + - 'release/*.*.x' jobs: trigger-oss-merge: @@ -26,4 +26,4 @@ jobs: curl -H "Authorization: token $GH_PAT" \ -H 'Accept: application/json' \ -d "{\"event_type\": \"oss-merge\", \"client_payload\": {\"git-ref\": \"${GIT_REF}\", \"git-sha\": \"${GIT_SHA}\", \"git-actor\": \"${GIT_ACTOR}\" }}" \ - "https://api.github.com/repos/hashicorp/consul-enterprise/dispatches" + "https://api.github.com/repos/hashicorp/consul-enterprise/dispatches" \ No newline at end of file diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml index b14b27183fb79..a29cadcb685b4 100644 --- a/.github/workflows/pr-labeler.yml +++ b/.github/workflows/pr-labeler.yml @@ -10,7 +10,7 @@ jobs: triage: runs-on: ubuntu-latest steps: - - uses: actions/labeler@0776a679364a9a16110aac8d0f40f5e11009e327 # v4.0.4 + - uses: actions/labeler@main with: repo-token: "${{ secrets.GITHUB_TOKEN }}" configuration-path: .github/pr-labeler.yml diff --git a/.github/workflows/pr-metrics-test-checker.yml b/.github/workflows/pr-metrics-test-checker.yml index 0d79aa39714b2..a73f4fbb3ff5a 100644 --- a/.github/workflows/pr-metrics-test-checker.yml +++ b/.github/workflows/pr-metrics-test-checker.yml @@ -14,7 +14,7 @@ jobs: if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/no-metrics-test') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )" runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 name: "checkout repo" with: ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/reusable-check-go-mod.yml b/.github/workflows/reusable-check-go-mod.yml index 868594168662f..2078b0c3217d6 100644 --- a/.github/workflows/reusable-check-go-mod.yml +++ b/.github/workflows/reusable-check-go-mod.yml @@ -18,12 +18,12 @@ jobs: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: go mod tidy diff --git a/.github/workflows/reusable-dev-build.yml b/.github/workflows/reusable-dev-build.yml index 3ca661398506e..d134508584c06 100644 --- a/.github/workflows/reusable-dev-build.yml +++ b/.github/workflows/reusable-dev-build.yml @@ -25,12 +25,12 @@ jobs: build: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - name: Build @@ -38,7 +38,7 @@ jobs: GOARCH: ${{ inputs.goarch }} run: make dev # save dev build to pass to downstream jobs - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 with: name: ${{inputs.uploaded-binary-name}} path: ./bin/consul diff --git a/.github/workflows/reusable-lint.yml b/.github/workflows/reusable-lint.yml index 9a9a26f0267e1..f7032f9866633 100644 --- a/.github/workflows/reusable-lint.yml +++ b/.github/workflows/reusable-lint.yml @@ -20,7 +20,6 @@ on: env: GOTAGS: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" GOARCH: ${{inputs.go-arch}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: lint: @@ -37,17 +36,17 @@ jobs: fail-fast: true name: lint ${{ matrix.directory }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - run: go env - name: lint-${{ matrix.directory }} - uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0 + uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # pin@v3.4.0 with: working-directory: ${{ matrix.directory }} version: v1.51.1 diff --git a/.github/workflows/reusable-unit-split.yml b/.github/workflows/reusable-unit-split.yml index e2da1920967e0..3b9eb489590aa 100644 --- a/.github/workflows/reusable-unit-split.yml +++ b/.github/workflows/reusable-unit-split.yml @@ -51,7 +51,6 @@ env: TOTAL_RUNNERS: ${{inputs.runner-count}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: @@ -60,8 +59,8 @@ jobs: outputs: package-matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' - id: set-matrix @@ -83,12 +82,12 @@ jobs: ulimit -Sa echo "Hard limits" ulimit -Ha - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' cache: true @@ -97,7 +96,7 @@ jobs: working-directory: ${{inputs.directory}} run: go mod download - name: Download consul - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{inputs.uploaded-binary-name}} path: ${{inputs.directory}} @@ -164,11 +163,11 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" ${{env.TEST_RESULTS}}/gotestsum-report.xml - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 with: name: test-results path: ${{env.TEST_RESULTS}} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 with: name: jsonfile path: /tmp/jsonfile diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index 3f7ffa277412d..e7caaae8d6a1f 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -46,19 +46,18 @@ env: GOARCH: ${{inputs.go-arch}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: go-test: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version-file: 'go.mod' cache: true @@ -67,7 +66,7 @@ jobs: working-directory: ${{inputs.directory}} run: go mod download - name: Download consul - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{inputs.uploaded-binary-name}} path: ${{inputs.directory}} @@ -132,11 +131,11 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" ${{env.TEST_RESULTS}}/gotestsum-report.xml - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 with: name: test-results path: ${{env.TEST_RESULTS}} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 with: name: jsonfile path: /tmp/jsonfile diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ff07a961a4e48..f3da6d422b6b1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ jobs: permissions: pull-requests: write steps: - - uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v8.0.0 + - uses: actions/stale@v4 with: days-before-stale: -1 days-before-close: -1 diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 641533012db41..38b6a44cbe018 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -23,7 +23,6 @@ env: CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: @@ -36,7 +35,7 @@ jobs: compute-xl: ${{ steps.runners.outputs.compute-xl }} enterprise: ${{ steps.runners.outputs.enterprise }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - id: runners run: .github/scripts/get_runner_classes.sh @@ -63,13 +62,13 @@ jobs: nomad-version: ['v1.3.3', 'v1.2.10', 'v1.1.16'] steps: - name: Checkout Nomad - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 with: repository: hashicorp/nomad ref: ${{ matrix.nomad-version }} - name: Install Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version-file: 'go.mod' @@ -143,14 +142,14 @@ jobs: env: VAULT_BINARY_VERSION: ${{ matrix.vault-version }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version-file: 'go.mod' @@ -233,7 +232,7 @@ jobs: outputs: envoy-matrix: ${{ steps.set-matrix.outputs.envoy-matrix }} steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - name: Generate Envoy Job Matrix id: set-matrix env: @@ -282,8 +281,8 @@ jobs: XDS_TARGET: ${{ matrix.xds-target }} AWS_LAMBDA_REGION: us-west-2 steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version-file: 'go.mod' @@ -296,7 +295,7 @@ jobs: run: chmod +x ./bin/consul - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1 - name: Docker build run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin @@ -365,12 +364,8 @@ jobs: env: ENVOY_VERSION: "1.25.4" steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version-file: 'go.mod' - run: go env @@ -480,12 +475,8 @@ jobs: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} ENVOY_VERSION: "1.24.6" steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version-file: 'go.mod' - run: go env diff --git a/.github/workflows/verify-envoy-version.yml b/.github/workflows/verify-envoy-version.yml index 069a281763f26..d097e335d37b2 100644 --- a/.github/workflows/verify-envoy-version.yml +++ b/.github/workflows/verify-envoy-version.yml @@ -18,7 +18,7 @@ jobs: verify-envoy-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/CHANGELOG.md b/CHANGELOG.md index 14435d09bb930..ff307a926983a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,100 +71,6 @@ BUG FIXES: * ui: fixes ui tests run on CI [[GH-16428](https://github.com/hashicorp/consul/issues/16428)] * xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)] -## 1.15.4 (June 26, 2023) -FEATURES: - -* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* connect: update supported envoy versions to 1.22.11, 1.23.9, 1.24.7, 1.25.6 [[GH-17545](https://github.com/hashicorp/consul/issues/17545)] -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] -* gateway: Change status condition reason for invalid certificate on a listener from "Accepted" to "ResolvedRefs". [[GH-17115](https://github.com/hashicorp/consul/issues/17115)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* docs: fix list of telemetry metrics [[GH-17593](https://github.com/hashicorp/consul/issues/17593)] -* gateways: **(Enterprise only)** Fixed a bug in API gateways where gateway configuration objects in non-default partitions did not reconcile properly. [[GH-17581](https://github.com/hashicorp/consul/issues/17581)] -* gateways: Fixed a bug in API gateways where binding a route that only targets a service imported from a peer results - in the programmed gateway having no routes. [[GH-17609](https://github.com/hashicorp/consul/issues/17609)] -* gateways: Fixed a bug where API gateways were not being taken into account in determining xDS rate limits. [[GH-17631](https://github.com/hashicorp/consul/issues/17631)] -* http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens. [[GH-17739](https://github.com/hashicorp/consul/issues/17739)] -* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] -* xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)] - -## 1.14.8 (June 26, 2023) - -SECURITY: - -* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] - -FEATURES: - -* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* connect: update supported envoy versions to 1.21.6, 1.22.11, 1.23.9, 1.24.7 [[GH-17547](https://github.com/hashicorp/consul/issues/17547)] -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] -* peering: gRPC queries for TrustBundleList, TrustBundleRead, PeeringList, and PeeringRead now support blocking semantics, - reducing network and CPU demand. - The HTTP APIs for Peering List and Read have been updated to support blocking. [[GH-17426](https://github.com/hashicorp/consul/issues/17426)] -* raft: Remove expensive reflection from raft/mesh hot path [[GH-16552](https://github.com/hashicorp/consul/issues/16552)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* connect: reverts #17317 fix that caused a downstream error for Ingress/Mesh/Terminating GWs when their respective config entry does not already exist. [[GH-17541](https://github.com/hashicorp/consul/issues/17541)] -* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. - This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] -* peering: Fix issue where modifying the list of exported services did not correctly replicate changes for services that exist in a non-default namespace. [[GH-17456](https://github.com/hashicorp/consul/issues/17456)] - -## 1.13.9 (June 26, 2023) -BREAKING CHANGES: - -* connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling - queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not - recommended for use in production environments. If you still wish to use the experimental peering feature, ensure - [`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled) - is set on all clients and servers. [[GH-17731](https://github.com/hashicorp/consul/issues/17731)] - -SECURITY: - -* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] - -FEATURES: - -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. - This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] - ## 1.16.0-rc1 (June 12, 2023) BREAKING CHANGES: diff --git a/GNUmakefile b/GNUmakefile index 79080311c48bd..ca80741ae4925 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -3,7 +3,6 @@ SHELL = bash - GO_MODULES := $(shell find . -name go.mod -exec dirname {} \; | grep -v "proto-gen-rpc-glue/e2e" | sort) ### @@ -73,7 +72,6 @@ CI_DEV_DOCKER_NAMESPACE?=hashicorpdev CI_DEV_DOCKER_IMAGE_NAME?=consul CI_DEV_DOCKER_WORKDIR?=bin/ ################ -CONSUL_VERSION?=$(shell cat version/VERSION) TEST_MODCACHE?=1 TEST_BUILDCACHE?=1 @@ -190,11 +188,8 @@ dev-docker: linux dev-build @docker buildx use default && docker buildx build -t 'consul:local' -t '$(CONSUL_DEV_IMAGE)' \ --platform linux/$(GOARCH) \ --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ - --label org.opencontainers.image.version=$(CONSUL_VERSION) \ - --label version=$(CONSUL_VERSION) \ --load \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ - docker tag 'consul:local' '$(CONSUL_COMPAT_TEST_IMAGE):local' check-remote-dev-image-env: ifndef REMOTE_DEV_IMAGE @@ -213,8 +208,6 @@ remote-docker: check-remote-dev-image-env @docker buildx use consul-builder && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \ --platform linux/amd64,linux/arm64 \ --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ - --label org.opencontainers.image.version=$(CONSUL_VERSION) \ - --label version=$(CONSUL_VERSION) \ --push \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ @@ -358,17 +351,16 @@ lint/%: @echo "--> Running enumcover ($*)" @cd $* && GOWORK=off enumcover ./... -# check that the test-container module only imports allowlisted packages -# from the root consul module. Generally we don't want to allow these imports. -# In a few specific instances though it is okay to import test definitions and -# helpers from some of the packages in the root module. .PHONY: lint-container-test-deps lint-container-test-deps: @echo "--> Checking container tests for bad dependencies" - @cd test/integration/consul-container && \ - $(CURDIR)/build-support/scripts/check-allowed-imports.sh \ - github.com/hashicorp/consul \ - internal/catalog/catalogtest + @cd test/integration/consul-container && ( \ + found="$$(go list -m all | grep -c '^github.com/hashicorp/consul ')" ; \ + if [[ "$$found" != "0" ]]; then \ + echo "test/integration/consul-container: This project should not depend on the root consul module" >&2 ; \ + exit 1 ; \ + fi \ + ) # Build the static web ui inside a Docker container. For local testing only; do not commit these assets. ui: ui-docker diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 8057487b2b45d..f9e02f8f11ad2 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -11,12 +11,16 @@ import ( "strings" "time" - "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/consul/envoyextensions/xdscommon" + "github.com/hashicorp/consul/version" + + "github.com/hashicorp/go-bexpr" "github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/serf" - "github.com/mitchellh/hashstructure" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -28,13 +32,11 @@ import ( "github.com/hashicorp/consul/agent/structs" token_store "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/envoyextensions/xdscommon" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging/monitor" "github.com/hashicorp/consul/types" - "github.com/hashicorp/consul/version" ) type Self struct { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index c465b687a8800..9f4210ac892a5 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -21,6 +21,10 @@ import ( "time" "github.com/armon/go-metrics" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/version" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/serf" @@ -40,14 +44,12 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" tokenStore "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/envoyextensions/xdscommon" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" - "github.com/hashicorp/consul/version" ) func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { diff --git a/agent/config/builder.go b/agent/config/builder.go index 98bac1711cace..5d191ce8b3ac3 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1290,10 +1290,6 @@ func (b *builder) validate(rt RuntimeConfig) error { "1 and 63 bytes.", rt.NodeName) } - if err := rt.StructLocality().Validate(); err != nil { - return fmt.Errorf("locality is invalid: %s", err) - } - if ipaddr.IsAny(rt.AdvertiseAddrLAN.IP) { return fmt.Errorf("Advertise address cannot be 0.0.0.0, :: or [::]") } @@ -1473,7 +1469,7 @@ func (b *builder) validate(rt RuntimeConfig) error { return err } case structs.VaultCAProvider: - if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig, rt.PrimaryDatacenter == rt.Datacenter); err != nil { + if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig); err != nil { return err } case structs.AWSCAProvider: @@ -2653,10 +2649,10 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, errors.New("verify_outgoing is not valid in the tls.grpc stanza") } - // Similarly, only the internal RPC and defaults configuration honor VerifyServerHostname + // Similarly, only the internal RPC configuration honors VerifyServerHostname // so we call it out here too. - if t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { - return c, errors.New("verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanzas") + if t.Defaults.VerifyServerHostname != nil || t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { + return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") } // And UseAutoCert right now only applies to external gRPC interface. @@ -2706,11 +2702,8 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error } mapCommon("internal_rpc", t.InternalRPC, &c.InternalRPC) + c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) - c.InternalRPC.VerifyServerHostname = boolVal(t.Defaults.VerifyServerHostname) - if t.InternalRPC.VerifyServerHostname != nil { - c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) - } // Setting only verify_server_hostname is documented to imply verify_outgoing. // If it doesn't then we risk sending communication over plain TCP when we // documented it as forcing TLS for RPCs. Enforce this here rather than in diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index b18a631624844..c4d598c10fc3d 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -1038,13 +1038,6 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, }, }) - run(t, testCase{ - desc: "locality invalid", - args: []string{`-data-dir=` + dataDir}, - json: []string{`{"locality": {"zone": "us-west-1a"}}`}, - hcl: []string{`locality { zone = "us-west-1a" }`}, - expectedErr: "locality is invalid: zone cannot be set without region", - }) run(t, testCase{ desc: "client addr and ports == 0", args: []string{`-data-dir=` + dataDir}, @@ -2736,44 +2729,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) - run(t, testCase{ - desc: "verify_server_hostname in the defaults stanza and internal_rpc", - args: []string{ - `-data-dir=` + dataDir, - }, - hcl: []string{` - tls { - defaults { - verify_server_hostname = false - }, - internal_rpc { - verify_server_hostname = true - } - } - `}, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": false - }, - "internal_rpc": { - "verify_server_hostname": true - } - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "verify_server_hostname in the grpc stanza", @@ -2796,7 +2752,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "verify_server_hostname in the https stanza", @@ -2819,7 +2775,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "translated keys", @@ -5760,74 +5716,6 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.TLS.InternalRPC.VerifyOutgoing = true }, }) - run(t, testCase{ - desc: "tls.defaults.verify_server_hostname implies tls.internal_rpc.verify_outgoing", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": true - } - } - } - `}, - hcl: []string{` - tls { - defaults { - verify_server_hostname = true - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - - rt.TLS.Domain = "consul." - rt.TLS.NodeName = "thehostname" - - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) - run(t, testCase{ - desc: "tls.internal_rpc.verify_server_hostname overwrites tls.defaults.verify_server_hostname", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": false - }, - "internal_rpc": { - "verify_server_hostname": true - } - } - } - `}, - hcl: []string{` - tls { - defaults { - verify_server_hostname = false - }, - internal_rpc { - verify_server_hostname = true - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - - rt.TLS.Domain = "consul." - rt.TLS.NodeName = "thehostname" - - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) run(t, testCase{ desc: "tls.grpc.use_auto_cert defaults to false", args: []string{ diff --git a/agent/connect/ca/provider_test.go b/agent/connect/ca/provider_test.go index 1ff4af397767d..b7ed9e29b412d 100644 --- a/agent/connect/ca/provider_test.go +++ b/agent/connect/ca/provider_test.go @@ -113,7 +113,7 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) { TLSSkipVerify: true, }, parseFunc: func(t *testing.T, raw map[string]interface{}) interface{} { - config, err := ParseVaultCAConfig(raw, true) + config, err := ParseVaultCAConfig(raw) require.NoError(t, err) return config }, diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 89350d87df3e4..00a598d92dea3 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -101,7 +101,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig { // Configure sets up the provider using the given configuration. // Configure supports being called multiple times to re-configure the provider. func (v *VaultProvider) Configure(cfg ProviderConfig) error { - config, err := ParseVaultCAConfig(cfg.RawConfig, v.isPrimary) + config, err := ParseVaultCAConfig(cfg.RawConfig) if err != nil { return err } @@ -192,11 +192,11 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { } func (v *VaultProvider) ValidateConfigUpdate(prevRaw, nextRaw map[string]interface{}) error { - prev, err := ParseVaultCAConfig(prevRaw, v.isPrimary) + prev, err := ParseVaultCAConfig(prevRaw) if err != nil { return fmt.Errorf("failed to parse existing CA config: %w", err) } - next, err := ParseVaultCAConfig(nextRaw, v.isPrimary) + next, err := ParseVaultCAConfig(nextRaw) if err != nil { return fmt.Errorf("failed to parse new CA config: %w", err) } @@ -800,7 +800,7 @@ func (v *VaultProvider) Cleanup(providerTypeChange bool, otherConfig map[string] v.Stop() if !providerTypeChange { - newConfig, err := ParseVaultCAConfig(otherConfig, v.isPrimary) + newConfig, err := ParseVaultCAConfig(otherConfig) if err != nil { return err } @@ -900,7 +900,7 @@ func (v *VaultProvider) autotidyIssuers(path string) (bool, string) { return tidySet, errStr } -func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.VaultCAProviderConfig, error) { +func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderConfig, error) { config := structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), } @@ -931,10 +931,10 @@ func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.Va return nil, fmt.Errorf("only one of Vault token or Vault auth method can be provided, but not both") } - if isPrimary && config.RootPKIPath == "" { + if config.RootPKIPath == "" { return nil, fmt.Errorf("must provide a valid path to a root PKI backend") } - if config.RootPKIPath != "" && !strings.HasSuffix(config.RootPKIPath, "/") { + if !strings.HasSuffix(config.RootPKIPath, "/") { config.RootPKIPath += "/" } diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 87dc1a04fe7a0..b0e341fe91eed 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -60,7 +60,6 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { cases := map[string]struct { rawConfig map[string]interface{} expConfig *structs.VaultCAProviderConfig - isPrimary bool expError string }{ "no token and no auth method provided": { @@ -71,26 +70,15 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { rawConfig: map[string]interface{}{"Token": "test", "AuthMethod": map[string]interface{}{"Type": "test"}}, expError: "only one of Vault token or Vault auth method can be provided, but not both", }, - "primary no root PKI path": { - rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, - isPrimary: true, + "no root PKI path": { + rawConfig: map[string]interface{}{"Token": "test"}, expError: "must provide a valid path to a root PKI backend", }, - "secondary no root PKI path": { - rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, - isPrimary: false, - expConfig: &structs.VaultCAProviderConfig{ - CommonCAProviderConfig: defaultCommonConfig(), - Token: "test", - IntermediatePKIPath: "test/", - }, - }, "no root intermediate path": { rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test"}, expError: "must provide a valid path for the intermediate PKI backend", }, "adds a slash to RootPKIPath and IntermediatePKIPath": { - isPrimary: true, rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test", "IntermediatePKIPath": "test"}, expConfig: &structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), @@ -103,7 +91,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { for name, c := range cases { t.Run(name, func(t *testing.T) { - config, err := ParseVaultCAConfig(c.rawConfig, c.isPrimary) + config, err := ParseVaultCAConfig(c.rawConfig) if c.expError != "" { require.EqualError(t, err, c.expError) } else { diff --git a/agent/dns.go b/agent/dns.go index 5804dc97dd8ef..cb1e3c310d0cd 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -1055,7 +1055,7 @@ func (d *DNSServer) trimDomain(query string) string { longer, shorter = shorter, longer } - if strings.HasSuffix(query, "."+strings.TrimLeft(longer, ".")) { + if strings.HasSuffix(query, longer) { return strings.TrimSuffix(query, longer) } return strings.TrimSuffix(query, shorter) diff --git a/agent/dns_test.go b/agent/dns_test.go index ef5364964dd35..46a7e758c7f1b 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -7071,45 +7071,6 @@ func TestDNS_AltDomains_Overlap(t *testing.T) { } } -func TestDNS_AltDomain_DCName_Overlap(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - // this tests the DC name overlap with the consul domain/alt-domain - // we should get response when DC suffix is a prefix of consul alt-domain - t.Parallel() - a := NewTestAgent(t, ` - datacenter = "dc-test" - node_name = "test-node" - alt_domain = "test.consul." - `) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc-test") - - questions := []string{ - "test-node.node.dc-test.consul.", - "test-node.node.dc-test.test.consul.", - } - - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - require.Len(t, in.Answer, 1) - - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, aRec.A.To4().String(), "127.0.0.1") - } -} - func TestDNS_PreparedQuery_AllowStale(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/proxycfg/mesh_gateway.go b/agent/proxycfg/mesh_gateway.go index 80aa75b78317f..f2fee37d46719 100644 --- a/agent/proxycfg/mesh_gateway.go +++ b/agent/proxycfg/mesh_gateway.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/acl" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/proxycfg/internal/watch" diff --git a/agent/proxycfg/proxycfg.deepcopy.go b/agent/proxycfg/proxycfg.deepcopy.go index d6f11319169ad..5b9d9ce3e7e08 100644 --- a/agent/proxycfg/proxycfg.deepcopy.go +++ b/agent/proxycfg/proxycfg.deepcopy.go @@ -13,10 +13,6 @@ import ( // DeepCopy generates a deep copy of *ConfigSnapshot func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { var cp ConfigSnapshot = *o - if o.ServiceLocality != nil { - cp.ServiceLocality = new(structs.Locality) - *cp.ServiceLocality = *o.ServiceLocality - } if o.ServiceMeta != nil { cp.ServiceMeta = make(map[string]string, len(o.ServiceMeta)) for k2, v2 := range o.ServiceMeta { diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 1d06e5fd8c9c6..00e501f6a9b2c 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -925,7 +925,6 @@ func IngressListenerKeyFromListener(l structs.IngressListener) IngressListenerKe type ConfigSnapshot struct { Kind structs.ServiceKind Service string - ServiceLocality *structs.Locality ProxyID ProxyID Address string Port int diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index 7bbb7f7b87c5c..55ba287ef1f25 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -126,7 +126,6 @@ type serviceInstance struct { taggedAddresses map[string]structs.ServiceAddress proxyCfg structs.ConnectProxyConfig token string - locality *structs.Locality } func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) { @@ -247,7 +246,6 @@ func newServiceInstanceFromNodeService(id ProxyID, ns *structs.NodeService, toke return serviceInstance{ kind: ns.Kind, service: ns.Service, - locality: ns.Locality, proxyID: id, address: ns.Address, port: ns.Port, @@ -307,7 +305,6 @@ func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig) return ConfigSnapshot{ Kind: s.kind, Service: s.service, - ServiceLocality: s.locality, ProxyID: s.proxyID, Address: s.address, Port: s.port, diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 1b433502541ed..c18a8013b6d45 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -574,7 +574,7 @@ func (e *ProxyConfigEntry) UnmarshalBinary(data []byte) error { // into a concrete type. // // There is an 'api' variation of this in -// command/helpers/helpers.go:newDecodeConfigEntry +// command/config/write/config_write.go:newDecodeConfigEntry func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { var entry ConfigEntry diff --git a/agent/structs/config_entry_inline_certificate.go b/agent/structs/config_entry_inline_certificate.go index de11f2c950456..17ffa9082b6fd 100644 --- a/agent/structs/config_entry_inline_certificate.go +++ b/agent/structs/config_entry_inline_certificate.go @@ -58,7 +58,6 @@ func (e *InlineCertificateConfigEntry) Validate() error { if privateKeyBlock == nil { return errors.New("failed to parse private key PEM") } - err = validateKeyLength(privateKeyBlock) if err != nil { return err diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 59385fa5ba448..f56dd8f6aad16 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -1480,10 +1480,6 @@ func (s *NodeService) IsGateway() bool { func (s *NodeService) Validate() error { var result error - if err := s.Locality.Validate(); err != nil { - result = multierror.Append(result, err) - } - if s.Kind == ServiceKindConnectProxy { if s.Port == 0 && s.SocketPath == "" { result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind)) @@ -2095,18 +2091,6 @@ func (csn *CheckServiceNode) CanRead(authz acl.Authorizer) acl.EnforcementDecisi return acl.Allow } -func (csn *CheckServiceNode) Locality() *Locality { - if csn.Service != nil && csn.Service.Locality != nil { - return csn.Service.Locality - } - - if csn.Node != nil && csn.Node.Locality != nil { - return csn.Node.Locality - } - - return nil -} - type CheckServiceNodes []CheckServiceNode func (csns CheckServiceNodes) DeepCopy() CheckServiceNodes { @@ -3128,15 +3112,3 @@ func (l *Locality) GetRegion() string { } return l.Region } - -func (l *Locality) Validate() error { - if l == nil { - return nil - } - - if l.Region == "" && l.Zone != "" { - return fmt.Errorf("zone cannot be set without region") - } - - return nil -} diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 668f5fb08faed..6d887da9ac776 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -592,43 +592,6 @@ func TestStructs_ServiceNode_Conversions(t *testing.T) { } } -func TestStructs_Locality_Validate(t *testing.T) { - type testCase struct { - locality *Locality - err string - } - cases := map[string]testCase{ - "nil": { - nil, - "", - }, - "region only": { - &Locality{Region: "us-west-1"}, - "", - }, - "region and zone": { - &Locality{Region: "us-west-1", Zone: "us-west-1a"}, - "", - }, - "zone only": { - &Locality{Zone: "us-west-1a"}, - "zone cannot be set without region", - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - err := tc.locality.Validate() - if tc.err == "" { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.err) - } - }) - } -} - func TestStructs_NodeService_ValidateMeshGateway(t *testing.T) { type testCase struct { Modify func(*NodeService) @@ -1189,13 +1152,6 @@ func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) { }, "", }, - { - "connect-proxy: invalid locality", - func(x *NodeService) { - x.Locality = &Locality{Zone: "bad"} - }, - "zone cannot be set without region", - }, } for _, tc := range cases { diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index aef2dc31c9f08..ad0397133666c 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -135,9 +135,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. endpoints, ok := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid] if ok { la := makeLoadAssignment( - cfgSnap, clusterName, - nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -160,9 +158,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid) if ok { la := makeLoadAssignment( - cfgSnap, name, - nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -228,9 +224,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain) la := makeLoadAssignment( - cfgSnap, clusterName, - nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -245,9 +239,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C clusterName := cfgSnap.ServerSNIFn(key.Datacenter, "") la := makeLoadAssignment( - cfgSnap, clusterName, - nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -417,9 +409,7 @@ func (s *ResourceGenerator) endpointsFromServicesAndResolvers( for subsetName, groups := range clusterEndpoints { clusterName := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) la := makeLoadAssignment( - cfgSnap, clusterName, - nil, groups, cfgSnap.Locality, ) @@ -454,9 +444,7 @@ func (s *ResourceGenerator) makeEndpointsForOutgoingPeeredServices( groups := []loadAssignmentEndpointGroup{{Endpoints: serviceGroup.Nodes, OnlyPassing: false}} la := makeLoadAssignment( - cfgSnap, clusterName, - nil, groups, // Use an empty key here so that it never matches. This will force the mesh gateway to always // reference the remote mesh gateway's wan addr. @@ -618,9 +606,7 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService( return la, nil } la = makeLoadAssignment( - cfgSnap, clusterName, - nil, []loadAssignmentEndpointGroup{ {Endpoints: localGw}, }, @@ -640,9 +626,7 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService( return nil, nil } la = makeLoadAssignment( - cfgSnap, clusterName, - nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -772,9 +756,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( } la := makeLoadAssignment( - cfgSnap, clusterName, - ti.PrioritizeByLocality, []loadAssignmentEndpointGroup{endpointGroup}, gatewayKey, ) @@ -860,7 +842,7 @@ type loadAssignmentEndpointGroup struct { OverrideHealth envoy_core_v3.HealthStatus } -func makeLoadAssignment(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, policy *structs.DiscoveryPrioritizeByLocality, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment { +func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment { cla := &envoy_endpoint_v3.ClusterLoadAssignment{ ClusterName: clusterName, Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)), @@ -874,46 +856,35 @@ func makeLoadAssignment(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, po } } - var priority uint32 - - for _, endpointGroup := range endpointGroups { - endpointsByLocality, err := groupedEndpoints(cfgSnap.ServiceLocality, policy, endpointGroup.Endpoints) - - if err != nil { - continue - } - - for _, endpoints := range endpointsByLocality { - es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpointGroup.Endpoints)) - - for _, ep := range endpoints { - // TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc? - _, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault())) - healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing) + for priority, endpointGroup := range endpointGroups { + endpoints := endpointGroup.Endpoints + es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpoints)) - if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN { - healthStatus = endpointGroup.OverrideHealth - } + for _, ep := range endpoints { + // TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc? + _, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault())) + healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing) - endpoint := &envoy_endpoint_v3.Endpoint{ - Address: makeAddress(addr, port), - } - es = append(es, &envoy_endpoint_v3.LbEndpoint{ - HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ - Endpoint: endpoint, - }, - HealthStatus: healthStatus, - LoadBalancingWeight: makeUint32Value(weight), - }) + if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN { + healthStatus = endpointGroup.OverrideHealth } - cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{ - Priority: priority, - LbEndpoints: es, + endpoint := &envoy_endpoint_v3.Endpoint{ + Address: makeAddress(addr, port), + } + es = append(es, &envoy_endpoint_v3.LbEndpoint{ + HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ + Endpoint: endpoint, + }, + HealthStatus: healthStatus, + LoadBalancingWeight: makeUint32Value(weight), }) - - priority++ } + + cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{ + Priority: uint32(priority), + LbEndpoints: es, + }) } return cla diff --git a/agent/xds/endpoints_test.go b/agent/xds/endpoints_test.go index eee35103aa618..ebdd06aa41e23 100644 --- a/agent/xds/endpoints_test.go +++ b/agent/xds/endpoints_test.go @@ -101,7 +101,6 @@ func Test_makeLoadAssignment(t *testing.T) { tests := []struct { name string clusterName string - locality *structs.Locality endpoints []loadAssignmentEndpointGroup want *envoy_endpoint_v3.ClusterLoadAssignment }{ @@ -212,24 +211,11 @@ func Test_makeLoadAssignment(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := makeLoadAssignment( - &proxycfg.ConfigSnapshot{ServiceLocality: tt.locality}, tt.clusterName, - nil, tt.endpoints, proxycfg.GatewayKey{Datacenter: "dc1"}, ) require.Equal(t, tt.want, got) - - if tt.locality == nil { - got := makeLoadAssignment( - &proxycfg.ConfigSnapshot{ServiceLocality: &structs.Locality{Region: "us-west-1", Zone: "us-west-1a"}}, - tt.clusterName, - nil, - tt.endpoints, - proxycfg.GatewayKey{Datacenter: "dc1"}, - ) - require.Equal(t, tt.want, got) - } }) } } diff --git a/agent/xds/failover_policy.go b/agent/xds/failover_policy.go index 77839a37cfeb8..5edcae914d52c 100644 --- a/agent/xds/failover_policy.go +++ b/agent/xds/failover_policy.go @@ -27,8 +27,6 @@ type targetInfo struct { // Region is the region from the failover target's Locality. nil means the // target is in the local Consul cluster. Region *string - - PrioritizeByLocality *structs.DiscoveryPrioritizeByLocality } type discoChainTargetGroup struct { @@ -89,7 +87,7 @@ func (s *ResourceGenerator) mapDiscoChainTargets(cfgSnap *proxycfg.ConfigSnapsho var sni, rootPEMs string var spiffeIDs []string targetUID := proxycfg.NewUpstreamIDFromTargetID(tid) - ti := targetInfo{TargetID: tid, PrioritizeByLocality: target.PrioritizeByLocality} + ti := targetInfo{TargetID: tid} configureTLS := true if forMeshGateway { diff --git a/agent/xds/locality_policy.go b/agent/xds/locality_policy.go deleted file mode 100644 index d2dd977f1ae74..0000000000000 --- a/agent/xds/locality_policy.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package xds - -import ( - "fmt" - - "github.com/hashicorp/consul/agent/structs" -) - -func groupedEndpoints(locality *structs.Locality, policy *structs.DiscoveryPrioritizeByLocality, csns structs.CheckServiceNodes) ([]structs.CheckServiceNodes, error) { - switch { - case policy == nil || policy.Mode == "" || policy.Mode == "none": - return []structs.CheckServiceNodes{csns}, nil - case policy.Mode == "failover": - return prioritizeByLocalityFailover(locality, csns), nil - default: - return nil, fmt.Errorf("unexpected priortize-by-locality mode %q", policy.Mode) - } -} diff --git a/agent/xds/locality_policy_oss.go b/agent/xds/locality_policy_oss.go deleted file mode 100644 index 16147aeb0c0d9..0000000000000 --- a/agent/xds/locality_policy_oss.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !consulent -// +build !consulent - -package xds - -import ( - "github.com/hashicorp/consul/agent/structs" -) - -func prioritizeByLocalityFailover(locality *structs.Locality, csns structs.CheckServiceNodes) []structs.CheckServiceNodes { - return nil -} diff --git a/api/go.mod b/api/go.mod index 335a6df7ce13b..ddc961f8bd74c 100644 --- a/api/go.mod +++ b/api/go.mod @@ -6,7 +6,7 @@ replace github.com/hashicorp/consul/sdk => ../sdk require ( github.com/google/go-cmp v0.5.9 - github.com/hashicorp/consul/sdk v0.13.1 + github.com/hashicorp/consul/sdk v0.14.0-rc1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/go-rootcerts v1.0.2 diff --git a/api/go.sum b/api/go.sum index fd85203e346fc..b0041f05248ad 100644 --- a/api/go.sum +++ b/api/go.sum @@ -43,6 +43,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/hashicorp/consul/sdk v0.14.0-rc1 h1:PuETOfN0uxl28i0Pq6rK7TBCrIl7psMbL0YTSje4KvM= +github.com/hashicorp/consul/sdk v0.14.0-rc1/go.mod h1:gHYeuDa0+0qRAD6Wwr6yznMBvBwHKoxSBoW5l73+saE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/api/watch/funcs_test.go b/api/watch/funcs_test.go index 4bd79a59c14f9..91318009ceac9 100644 --- a/api/watch/funcs_test.go +++ b/api/watch/funcs_test.go @@ -1196,110 +1196,6 @@ func TestChecksWatch_Filter(t *testing.T) { } } -func TestChecksWatch_Filter_by_ServiceNameStatus(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - s.WaitForSerfCheck(t) - - var ( - wakeups [][]*api.HealthCheck - notifyCh = make(chan struct{}) - ) - - plan := mustParse(t, `{"type":"checks", "filter":"ServiceName == bar and Status == critical"}`) - plan.Handler = func(idx uint64, raw interface{}) { - if raw == nil { - return // ignore - } - v, ok := raw.([]*api.HealthCheck) - if !ok { - return // ignore - } - wakeups = append(wakeups, v) - notifyCh <- struct{}{} - } - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - if err := plan.Run(s.HTTPAddr); err != nil { - t.Errorf("err: %v", err) - } - }() - defer plan.Stop() - - // Wait for first wakeup. - <-notifyCh - { - catalog := c.Catalog() - - // we don't want to find this one - reg := &api.CatalogRegistration{ - Node: "foo", - Address: "1.1.1.1", - Datacenter: "dc1", - Service: &api.AgentService{ - ID: "foo", - Service: "foo", - Tags: []string{"a"}, - }, - Check: &api.AgentCheck{ - Node: "foo", - CheckID: "foo", - Name: "foo", - Status: api.HealthPassing, - ServiceID: "foo", - }, - } - if _, err := catalog.Register(reg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // we want to find this one - reg = &api.CatalogRegistration{ - Node: "bar", - Address: "2.2.2.2", - Datacenter: "dc1", - Service: &api.AgentService{ - ID: "bar", - Service: "bar", - Tags: []string{"a", "b"}, - }, - Check: &api.AgentCheck{ - Node: "bar", - CheckID: "bar", - Name: "bar", - Status: api.HealthCritical, - ServiceID: "bar", - }, - } - if _, err := catalog.Register(reg, nil); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Wait for second wakeup. - <-notifyCh - - plan.Stop() - wg.Wait() - - require.Len(t, wakeups, 2) - - { - v := wakeups[0] - require.Len(t, v, 0) - } - { - v := wakeups[1] - require.Len(t, v, 1) - require.Equal(t, "bar", v[0].CheckID) - } -} - func TestEventWatch(t *testing.T) { t.Parallel() c, s := makeClient(t) diff --git a/build-support/scripts/check-allowed-imports.sh b/build-support/scripts/check-allowed-imports.sh deleted file mode 100755 index fb0280e6ff08c..0000000000000 --- a/build-support/scripts/check-allowed-imports.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -readonly SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -readonly SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" -readonly SOURCE_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")" -readonly FN_DIR="$(dirname "${SCRIPT_DIR}")/functions" - -source "${SCRIPT_DIR}/functions.sh" - - -set -uo pipefail - -usage() { -cat <<-EOF -Usage: ${SCRIPT_NAME} [...] - -Description: - Verifies that only the specified packages may be imported from the given module - -Options: - -h | --help Print this help text. -EOF -} - -function err_usage { - err "$1" - err "" - err "$(usage)" -} - -function main { - local module_root="" - declare -a allowed_packages=() - while test $# -gt 0 - do - case "$1" in - -h | --help ) - usage - return 0 - ;; - * ) - if test -z "$module_root" - then - module_root="$1" - else - allowed_packages+="$1" - fi - shift - esac - done - - # If we could guarantee this ran with bash 4.2+ then the final argument could - # be just ${allowed_packages[@]}. However that with older versions of bash - # in combination with set -u causes bash to emit errors about using unbound - # variables when no allowed packages have been specified (i.e. the module should - # generally be disallowed with no exceptions). This syntax is very strange - # but seems to be the prescribed workaround I found. - check_imports "$module_root" ${allowed_packages[@]+"${allowed_packages[@]}"} - return $? -} - -function check_imports { - local module_root="$1" - shift - local allowed_packages="$@" - - module_imports=$( go list -test -f '{{join .TestImports "\n"}}' ./... | grep "$module_root" | sort | uniq) - module_test_imports=$( go list -test -f '{{join .TestImports "\n"}}' ./... | grep "$module_root" | sort | uniq) - - any_error=0 - - for imp in $module_imports - do - is_import_allowed "$imp" "$module_root" $allowed_packages - allowed=$? - - if test $any_error -ne 1 - then - any_error=$allowed - fi - done - - if test $any_error -eq 1 - then - echo "Only the following direct imports are allowed from module $module_root:" - for pkg in $allowed_packages - do - echo " * $pkg" - done - fi - - return $any_error -} - -function is_import_allowed { - local pkg_import=$1 - shift - local module_root=$1 - shift - local allowed_packages="$@" - - # check if the import path is a part of the module we are restricting imports for - if test "$( go list -f '{{.Module.Path}}' $pkg_import)" != "$module_root" - then - return 0 - fi - - for pkg in $allowed_packages - do - if test "${module_root}/$pkg" == "$pkg_import" - then - return 0 - fi - done - - err "Import of package $pkg_import is not allowed" - return 1 -} - -main "$@" -exit $? \ No newline at end of file diff --git a/build-support/scripts/protobuf.sh b/build-support/scripts/protobuf.sh index f7b8ce5594870..420d66d6a11bd 100755 --- a/build-support/scripts/protobuf.sh +++ b/build-support/scripts/protobuf.sh @@ -72,10 +72,6 @@ function main { status "Generated gRPC rate limit mapping file" - generate_protoset_file - - status "Generated protoset file" - return 0 } @@ -156,11 +152,5 @@ function generate_rate_limit_mappings { } } -function generate_protoset_file { - local pkg_dir="${SOURCE_DIR}/pkg" - mkdir -p "$pkg_dir" - print_run buf build -o "${pkg_dir}/consul.protoset" -} - main "$@" exit $? diff --git a/command/config/write/config_write.go b/command/config/write/config_write.go index d8e8aff20ad8d..d6a0c188b8fa1 100644 --- a/command/config/write/config_write.go +++ b/command/config/write/config_write.go @@ -7,12 +7,17 @@ import ( "flag" "fmt" "io" + "time" + "github.com/hashicorp/go-multierror" "github.com/mitchellh/cli" + "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/config" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/helpers" + "github.com/hashicorp/consul/lib/decode" ) func New(ui cli.Ui) *cmd { @@ -104,6 +109,67 @@ func (c *cmd) Run(args []string) int { return 0 } +// There is a 'structs' variation of this in +// agent/structs/config_entry.go:DecodeConfigEntry +func newDecodeConfigEntry(raw map[string]interface{}) (api.ConfigEntry, error) { + var entry api.ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := api.MakeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + var md mapstructure.Metadata + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + decode.HookWeakDecodeFromSlice, + decode.HookTranslateKeys, + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToTimeHookFunc(time.RFC3339), + ), + Metadata: &md, + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, err + } + + for _, k := range md.Unused { + switch k { + case "kind", "Kind": + // The kind field is used to determine the target, but doesn't need + // to exist on the target. + continue + } + err = multierror.Append(err, fmt.Errorf("invalid config key %q", k)) + } + if err != nil { + return nil, err + } + + return entry, nil +} + func (c *cmd) Synopsis() string { return synopsis } diff --git a/docs/README.md b/docs/README.md index 8bebb848c9b11..d3483710b33bd 100644 --- a/docs/README.md +++ b/docs/README.md @@ -40,7 +40,6 @@ Also see the [FAQ](./faq.md). 1. [Integration Tests](../test/integration/connect/envoy/README.md) 1. [Upgrade Tests](../test/integration/consul-container/test/upgrade/README.md) -1. [Remote Debugging Integration Tests](../test/integration/consul-container/test/debugging.md) ## Important Directories diff --git a/envoyextensions/go.mod b/envoyextensions/go.mod index 6a6128fa6cee8..e426b50365de9 100644 --- a/envoyextensions/go.mod +++ b/envoyextensions/go.mod @@ -6,8 +6,8 @@ replace github.com/hashicorp/consul/api => ../api require ( github.com/envoyproxy/go-control-plane v0.11.0 - github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/consul/sdk v0.13.1 + github.com/hashicorp/consul/api v1.22.0-rc1 + github.com/hashicorp/consul/sdk v0.14.0-rc1 github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-version v1.2.1 @@ -30,7 +30,6 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/kr/pretty v0.3.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -40,6 +39,5 @@ require ( golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/sys v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/envoyextensions/go.sum b/envoyextensions/go.sum index 52d5f9ed00c22..929a26218e652 100644 --- a/envoyextensions/go.sum +++ b/envoyextensions/go.sum @@ -24,7 +24,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -62,8 +61,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= -github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/consul/api v1.22.0-rc1 h1:ePmGqndeMgaI38KUbSA/CqTzeEAIogXyWnfNJzglo70= +github.com/hashicorp/consul/api v1.22.0-rc1/go.mod h1:wtduXtbAqSGtBdi3tyA5SSAYGAG51rBejV9SEUBciMY= +github.com/hashicorp/consul/sdk v0.14.0-rc1 h1:PuETOfN0uxl28i0Pq6rK7TBCrIl7psMbL0YTSje4KvM= +github.com/hashicorp/consul/sdk v0.14.0-rc1/go.mod h1:gHYeuDa0+0qRAD6Wwr6yznMBvBwHKoxSBoW5l73+saE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -108,13 +109,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -169,8 +167,7 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -265,11 +262,8 @@ google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cn google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/catalog/catalogtest/run_test.go b/internal/catalog/catalogtest/run_test.go index defaad2a16d65..7c17052d8246d 100644 --- a/internal/catalog/catalogtest/run_test.go +++ b/internal/catalog/catalogtest/run_test.go @@ -37,8 +37,3 @@ func TestControllers_Integration(t *testing.T) { client := runInMemResourceServiceAndControllers(t, catalog.DefaultControllerDependencies()) RunCatalogV1Alpha1IntegrationTest(t, client) } - -func TestControllers_Lifecycle(t *testing.T) { - client := runInMemResourceServiceAndControllers(t, catalog.DefaultControllerDependencies()) - RunCatalogV1Alpha1LifecycleIntegrationTest(t, client) -} diff --git a/internal/catalog/catalogtest/test_integration_v1alpha1.go b/internal/catalog/catalogtest/test_integration_v1alpha1.go index 19be6d7a48462..8a7f4cd9a2488 100644 --- a/internal/catalog/catalogtest/test_integration_v1alpha1.go +++ b/internal/catalog/catalogtest/test_integration_v1alpha1.go @@ -698,7 +698,6 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S } func verifyServiceEndpoints(t *testing.T, c *rtest.Client, id *pbresource.ID, expected *pbcatalog.ServiceEndpoints) { - t.Helper() c.WaitForResourceState(t, id, func(t rtest.T, res *pbresource.Resource) { var actual pbcatalog.ServiceEndpoints err := res.Data.UnmarshalTo(&actual) diff --git a/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go b/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go deleted file mode 100644 index d7529a6ec48c8..0000000000000 --- a/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go +++ /dev/null @@ -1,706 +0,0 @@ -package catalogtest - -import ( - "testing" - - "github.com/hashicorp/consul/internal/catalog" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -// RunCatalogV1Alpha1LifecycleIntegrationTest intends to excercise functionality of -// managing catalog resources over their normal lifecycle where they will be modified -// several times, change state etc. -func RunCatalogV1Alpha1LifecycleIntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { - t.Helper() - - testutil.RunStep(t, "node-lifecycle", func(t *testing.T) { - RunCatalogV1Alpha1NodeLifecycleIntegrationTest(t, client) - }) - - testutil.RunStep(t, "workload-lifecycle", func(t *testing.T) { - RunCatalogV1Alpha1WorkloadLifecycleIntegrationTest(t, client) - }) - - testutil.RunStep(t, "endpoints-lifecycle", func(t *testing.T) { - RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t, client) - }) -} - -// RunCatalogV1Alpha1NodeLifecycleIntegrationTest verifies correct functionality of -// the node-health controller. This test will exercise the following behaviors: -// -// * Creating a Node without associated HealthStatuses will mark the node as passing -// * Associating a HealthStatus with a Node will cause recomputation of the Health -// * Changing HealthStatus to a worse health will cause recomputation of the Health -// * Changing HealthStatus to a better health will cause recomputation of the Health -// * Deletion of associated HealthStatuses will recompute the Health (back to passing) -// * Deletion of the node will cause deletion of associated health statuses -func RunCatalogV1Alpha1NodeLifecycleIntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { - c := rtest.NewClient(client) - - nodeName := "test-lifecycle" - nodeHealthName := "test-lifecycle-node-status" - - // initial node creation - node := rtest.Resource(catalog.NodeV1Alpha1Type, nodeName). - WithData(t, &pbcatalog.Node{ - Addresses: []*pbcatalog.NodeAddress{ - {Host: "172.16.2.3"}, - {Host: "198.18.2.3", External: true}, - }, - }). - Write(t, c) - - // wait for the node health controller to mark the node as healthy - c.WaitForStatusCondition(t, node.Id, - catalog.NodeHealthStatusKey, - catalog.NodeHealthConditions[pbcatalog.Health_HEALTH_PASSING]) - - // Its easy enough to simply repeatedly set the health status and it proves - // that going both from better to worse health and worse to better all - // happen as expected. We leave the health in a warning state to allow for - // the subsequent health status deletion to cause the health to go back - // to passing. - healthChanges := []pbcatalog.Health{ - pbcatalog.Health_HEALTH_PASSING, - pbcatalog.Health_HEALTH_WARNING, - pbcatalog.Health_HEALTH_CRITICAL, - pbcatalog.Health_HEALTH_MAINTENANCE, - pbcatalog.Health_HEALTH_CRITICAL, - pbcatalog.Health_HEALTH_WARNING, - pbcatalog.Health_HEALTH_PASSING, - pbcatalog.Health_HEALTH_WARNING, - } - - // This will be set within the loop and used afterwards to delete the health status - var nodeHealth *pbresource.Resource - - // Iterate through the various desired health statuses, updating - // a HealthStatus resource owned by the node and waiting for - // reconciliation at each point - for _, health := range healthChanges { - // update the health check - nodeHealth = setHealthStatus(t, c, node.Id, nodeHealthName, health) - - // wait for reconciliation to kick in and put the node into the right - // health status. - c.WaitForStatusCondition(t, node.Id, - catalog.NodeHealthStatusKey, - catalog.NodeHealthConditions[health]) - } - - // now delete the health status and ensure things go back to passing - c.MustDelete(t, nodeHealth.Id) - - // wait for the node health controller to mark the node as healthy - c.WaitForStatusCondition(t, node.Id, - catalog.NodeHealthStatusKey, - catalog.NodeHealthConditions[pbcatalog.Health_HEALTH_PASSING]) - - // Add the health status back once more, the actual status doesn't matter. - // It just must be owned by the node so that we can show cascading - // deletions of owned health statuses working. - healthStatus := setHealthStatus(t, c, node.Id, nodeHealthName, pbcatalog.Health_HEALTH_CRITICAL) - - // Delete the node and wait for the health status to be deleted. - c.MustDelete(t, node.Id) - c.WaitForDeletion(t, healthStatus.Id) -} - -// RunCatalogV1Alpha1WorkloadLifecycleIntegrationTest verifies correct functionality of -// the workload-health controller. This test will exercise the following behaviors: -// -// - Associating a workload with a node causes recomputation of the health and takes -// into account the nodes health -// - Modifying the workloads associated node causes health recomputation and takes into -// account the new nodes health -// - Removal of the node association causes recomputation of health and for no node health -// to be taken into account. -// - Creating a workload without associated health statuses or node association will -// be marked passing -// - Creating a workload without associated health statuses but with a node will -// inherit its health from the node. -// - Changing HealthStatus to a worse health will cause recompuation of the Health -// - Changing HealthStatus to a better health will cause recompuation of the Health -// - Overall health is computed as the worst health amongst the nodes health and all -// of the workloads associated HealthStatuses -// - Deletion of the workload will cause deletion of all associated health statuses. -func RunCatalogV1Alpha1WorkloadLifecycleIntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { - c := rtest.NewClient(client) - testutil.RunStep(t, "nodeless-workload", func(t *testing.T) { - runV1Alpha1NodelessWorkloadLifecycleIntegrationTest(t, c) - }) - - testutil.RunStep(t, "node-associated-workload", func(t *testing.T) { - runV1Alpha1NodeAssociatedWorkloadLifecycleIntegrationTest(t, c) - }) -} - -// runV1Alpha1NodelessWorkloadLifecycleIntegrationTest verifies correct functionality of -// the workload-health controller for workloads without node associations. In particular -// the following behaviors are being tested -// -// - Creating a workload without associated health statuses or node association will -// be marked passing -// - Changing HealthStatus to a worse health will cause recompuation of the Health -// - Changing HealthStatus to a better health will cause recompuation of the Health -// - Deletion of associated HealthStatus for a nodeless workload will be set back to passing -// - Deletion of the workload will cause deletion of all associated health statuses. -func runV1Alpha1NodelessWorkloadLifecycleIntegrationTest(t *testing.T, c *rtest.Client) { - workloadName := "test-lifecycle-workload" - workloadHealthName := "test-lifecycle-workload-status" - - // create a workload without a node association or health statuses yet - workload := rtest.Resource(catalog.WorkloadV1Alpha1Type, workloadName). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "198.18.9.8"}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - Identity: "test-lifecycle", - }). - Write(t, c) - - // wait for the workload health controller to mark the workload as healthy - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadHealthConditions[pbcatalog.Health_HEALTH_PASSING]) - - // We may not need to iterate through all of these states but its easy - // enough and quick enough to do so. The general rationale is that we - // should move through changing the workloads associated health status - // in this progression. We can prove that moving from better to worse - // health or worse to better both function correctly. - healthChanges := []pbcatalog.Health{ - pbcatalog.Health_HEALTH_PASSING, - pbcatalog.Health_HEALTH_WARNING, - pbcatalog.Health_HEALTH_CRITICAL, - pbcatalog.Health_HEALTH_MAINTENANCE, - pbcatalog.Health_HEALTH_CRITICAL, - pbcatalog.Health_HEALTH_WARNING, - pbcatalog.Health_HEALTH_PASSING, - pbcatalog.Health_HEALTH_WARNING, - } - - var workloadHealth *pbresource.Resource - // Iterate through the various desired health statuses, updating - // a HealthStatus resource owned by the workload and waiting for - // reconciliation at each point - for _, health := range healthChanges { - // update the health status - workloadHealth = setHealthStatus(t, c, workload.Id, workloadHealthName, health) - - // wait for reconciliation to kick in and put the workload into - // the right health status. - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadHealthConditions[health]) - } - - // Now delete the health status, things should go back to passing status - c.MustDelete(t, workloadHealth.Id) - - // ensure the workloads health went back to passing - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadHealthConditions[pbcatalog.Health_HEALTH_PASSING]) - - // Reset the workload health. The actual health is irrelevant, we just want it - // to exist to provde that Health Statuses get deleted along with the workload - // when its deleted. - workloadHealth = setHealthStatus(t, c, workload.Id, workloadHealthName, pbcatalog.Health_HEALTH_WARNING) - - // Delete the workload and wait for the HealthStatus to also be deleted - c.MustDelete(t, workload.Id) - c.WaitForDeletion(t, workloadHealth.Id) -} - -// runV1Alpha1NodeAssociatedWorkloadLifecycleIntegrationTest verifies correct functionality of -// the workload-health controller. This test will exercise the following behaviors: -// -// - Associating a workload with a node causes recomputation of the health and takes -// into account the nodes health -// - Modifying the workloads associated node causes health recomputation and takes into -// account the new nodes health -// - Removal of the node association causes recomputation of health and for no node health -// to be taken into account. -// - Creating a workload without associated health statuses but with a node will -// inherit its health from the node. -// - Overall health is computed as the worst health amongst the nodes health and all -// of the workloads associated HealthStatuses -func runV1Alpha1NodeAssociatedWorkloadLifecycleIntegrationTest(t *testing.T, c *rtest.Client) { - workloadName := "test-lifecycle" - workloadHealthName := "test-lifecycle" - nodeName1 := "test-lifecycle-1" - nodeName2 := "test-lifecycle-2" - nodeHealthName1 := "test-lifecycle-node-1" - nodeHealthName2 := "test-lifecycle-node-2" - - // Insert a some nodes to link the workloads to at various points throughout the test - node1 := rtest.Resource(catalog.NodeV1Alpha1Type, nodeName1). - WithData(t, &pbcatalog.Node{ - Addresses: []*pbcatalog.NodeAddress{{Host: "172.17.9.10"}}, - }). - Write(t, c) - node2 := rtest.Resource(catalog.NodeV1Alpha1Type, nodeName2). - WithData(t, &pbcatalog.Node{ - Addresses: []*pbcatalog.NodeAddress{{Host: "172.17.9.11"}}, - }). - Write(t, c) - - // Set some non-passing health statuses for those nodes. Using non-passing will make - // it easy to see that changing a passing workloads node association appropriately - // impacts the overall workload health. - setHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_CRITICAL) - setHealthStatus(t, c, node2.Id, nodeHealthName2, pbcatalog.Health_HEALTH_WARNING) - - // Add the workload but don't immediately associate with any node. - workload := rtest.Resource(catalog.WorkloadV1Alpha1Type, workloadName). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "198.18.9.8"}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - Identity: "test-lifecycle", - }). - Write(t, c) - - // wait for the workload health controller to mark the workload as healthy - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadHealthConditions[pbcatalog.Health_HEALTH_PASSING]) - - // now modify the workload to associate it with node 1 (currently with CRITICAL health) - workload = rtest.ResourceID(workload.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{{Host: "198.18.9.8"}}, - Ports: map[string]*pbcatalog.WorkloadPort{"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - Identity: "test-lifecycle", - // this is the only difference from the previous write - NodeName: node1.Id.Name, - }). - Write(t, c) - - // wait for the workload health controller to mark the workload as critical (due to node 1 having critical health) - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_PASSING][pbcatalog.Health_HEALTH_CRITICAL]) - - // Now reassociate the workload with node 2. This should cause recalculation of its health into the warning state - workload = rtest.ResourceID(workload.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{{Host: "198.18.9.8"}}, - Ports: map[string]*pbcatalog.WorkloadPort{"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - Identity: "test-lifecycle", - // this is the only difference from the previous write - NodeName: node2.Id.Name, - }). - Write(t, c) - - // Wait for the workload health controller to mark the workload as warning (due to node 2 having warning health) - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_PASSING][pbcatalog.Health_HEALTH_WARNING]) - - // Delete the node, this should cause the health to be recalculated as critical because the node association - // is broken. - c.MustDelete(t, node2.Id) - - // Wait for the workload health controller to mark the workload as critical due to the missing node - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_PASSING][pbcatalog.Health_HEALTH_CRITICAL]) - - // Now fixup the node association to point at node 1 - workload = rtest.ResourceID(workload.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{{Host: "198.18.9.8"}}, - Ports: map[string]*pbcatalog.WorkloadPort{"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - Identity: "test-lifecycle", - // this is the only difference from the previous write - NodeName: node1.Id.Name, - }). - Write(t, c) - - // Also set node 1 health down to WARNING - setHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_WARNING) - - // Wait for the workload health controller to mark the workload as warning (due to node 1 having warning health now) - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_PASSING][pbcatalog.Health_HEALTH_WARNING]) - - // Now add a critical workload health check to ensure that both node and workload health are accounted for. - setHealthStatus(t, c, workload.Id, workloadHealthName, pbcatalog.Health_HEALTH_CRITICAL) - - // Wait for the workload health to be recomputed and put into the critical status. - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_CRITICAL][pbcatalog.Health_HEALTH_WARNING]) - - // Reset the workloads health to passing. We expect the overall health to go back to warning - setHealthStatus(t, c, workload.Id, workloadHealthName, pbcatalog.Health_HEALTH_PASSING) - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadAndNodeHealthConditions[pbcatalog.Health_HEALTH_PASSING][pbcatalog.Health_HEALTH_WARNING]) - - // Remove the node association and wait for the health to go back to passing - workload = rtest.ResourceID(workload.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{{Host: "198.18.9.8"}}, - Ports: map[string]*pbcatalog.WorkloadPort{"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - Identity: "test-lifecycle", - }). - Write(t, c) - c.WaitForStatusCondition(t, workload.Id, - catalog.WorkloadHealthStatusKey, - catalog.WorkloadHealthConditions[pbcatalog.Health_HEALTH_PASSING]) -} - -// RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest verifies the correct functionality of -// the endpoints controller. This test will exercise the following behaviors: -// -// * Services without a selector get marked with status indicating their endpoints are unmanaged -// * Services with a selector get marked with status indicating their endpoints are managed -// * Deleting a service will delete the associated endpoints (regardless of them being managed or not) -// * Moving from managed to unmanaged endpoints will delete the managed endpoints -// * Moving from unmanaged to managed endpoints will overwrite any previous endpoints. -// * A service with a selector that matches no workloads will still have the endpoints object written. -// * Adding ports to a service will recalculate the endpoints -// * Removing ports from a service will recalculate the endpoints -// * Changing the workload will recalculate the endpoints (ports, addresses, or health) -func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) { - c := rtest.NewClient(client) - serviceName := "test-lifecycle" - - // Create the service without a selector. We should not see endpoints generated but we should see the - // status updated to note endpoints are not being managed. - service := rtest.Resource(catalog.ServiceV1Alpha1Type, serviceName). - WithData(t, &pbcatalog.Service{ - Ports: []*pbcatalog.ServicePort{{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - }). - Write(t, c) - - // Wait to ensure the status is updated accordingly - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionUnmanaged) - - // Verify that no endpoints were created. - endpointsID := rtest.Resource(catalog.ServiceEndpointsV1Alpha1Type, serviceName).ID() - c.RequireResourceNotFound(t, endpointsID) - - // Add some empty endpoints (type validations enforce that they are owned by the service) - rtest.ResourceID(endpointsID). - WithData(t, &pbcatalog.ServiceEndpoints{}). - WithOwner(service.Id). - Write(t, c) - - // Now delete the service and ensure that they are cleaned up. - c.MustDelete(t, service.Id) - c.WaitForDeletion(t, endpointsID) - - // Add some workloads to eventually select by the service - - // api-1 has all ports (http, grpc and mesh). It also has a mixture of Addresses - // that select individual ports and one that selects all ports implicitly - api1 := rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-1"). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - {Host: "::1", Ports: []string{"grpc"}}, - {Host: "127.0.0.2", Ports: []string{"http"}}, - {Host: "172.17.1.1", Ports: []string{"mesh"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - Identity: "api", - }). - Write(t, c) - - // api-2 has only grpc and mesh ports. It also has a mixture of Addresses that - // select individual ports and one that selects all ports implicitly - api2 := rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-2"). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - {Host: "::1", Ports: []string{"grpc"}}, - {Host: "172.17.1.2", Ports: []string{"mesh"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - Identity: "api", - }). - Write(t, c) - - // api-3 has the mesh and HTTP ports. It also has a mixture of Addresses that - // select individual ports and one that selects all ports. - api3 := rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-3"). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - {Host: "172.17.1.3", Ports: []string{"mesh"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - Identity: "api", - }). - Write(t, c) - - // Now create a service with unmanaged endpoints again - service = rtest.Resource(catalog.ServiceV1Alpha1Type, serviceName). - WithData(t, &pbcatalog.Service{ - Ports: []*pbcatalog.ServicePort{{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - }). - Write(t, c) - - // Inject the endpoints resource. We want to prove that transition from unmanaged to - // managed endpoints results in overwriting of the old endpoints - rtest.ResourceID(endpointsID). - WithData(t, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "198.18.1.1", External: true}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 443, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - }, - }). - WithOwner(service.Id). - Write(t, c) - - // Wait to ensure the status is updated accordingly - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionUnmanaged) - - // Now move the service to having managed endpoints - service = rtest.ResourceID(service.Id). - WithData(t, &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{Names: []string{"bar"}}, - Ports: []*pbcatalog.ServicePort{{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - }). - Write(t, c) - - // Verify that this status is updated to show this service as having managed endpoints - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionManaged) - - // Verify that the service endpoints are created. In this case they will be empty - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{}) - - // Rewrite the service to select the API workloads - just select the singular port for now - service = rtest.ResourceID(service.Id). - WithData(t, &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{"api-"}}, - Ports: []*pbcatalog.ServicePort{{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}}, - }). - Write(t, c) - - // Wait for the status to be updated. The condition itself will remain unchanged but we are waiting for - // the generations to match to know that the endpoints would have been regenerated - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionManaged) - - // ensure that api-1 and api-3 are selected but api-2 is excluded due to not having the desired port - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: api1.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"http"}}, - {Host: "127.0.0.2", Ports: []string{"http"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - { - TargetRef: api3.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"http"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - }, - }) - - // Rewrite the service to select the API workloads - changing from selecting the HTTP port to the gRPC port - service = rtest.ResourceID(service.Id). - WithData(t, &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{"api-"}}, - Ports: []*pbcatalog.ServicePort{{TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}}, - }). - Write(t, c) - - // Wait for the status to be updated. The condition itself will remain unchanged but we are waiting for - // the generations to match to know that the endpoints would have been regenerated - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionManaged) - - // Check that the endpoints were generated as expected - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: api1.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc"}}, - {Host: "::1", Ports: []string{"grpc"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - { - TargetRef: api2.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc"}}, - {Host: "::1", Ports: []string{"grpc"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - }, - }) - - // Update the service to change the ports used. This should result in the workload being removed - // from the endpoints - rtest.ResourceID(api2.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - {Host: "::1", Ports: []string{"http"}}, - {Host: "172.17.1.2", Ports: []string{"mesh"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }, - Identity: "api", - }). - Write(t, c) - - // Verify that api-2 was removed from the service endpoints as it no longer has a grpc port - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: api1.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc"}}, - {Host: "::1", Ports: []string{"grpc"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - }, - }) - - // Remove the ::1 address from workload api1 which should result in recomputing endpoints - rtest.ResourceID(api1.Id). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - {Host: "172.17.1.1", Ports: []string{"mesh"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - Identity: "api", - }). - Write(t, c) - - // Verify that api-1 had its addresses modified appropriately - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: api1.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - }, - }, - }) - - // Add a failing health status to the api1 workload to force recomputation of endpoints - setHealthStatus(t, c, api1.Id, "api-failed", pbcatalog.Health_HEALTH_CRITICAL) - - // Verify that api-1 within the endpoints has the expected health - verifyServiceEndpoints(t, c, endpointsID, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: api1.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, - }, - }, - }) - - // Move the service to being unmanaged. We should see the ServiceEndpoints being removed. - service = rtest.ResourceID(service.Id). - WithData(t, &pbcatalog.Service{ - Ports: []*pbcatalog.ServicePort{{TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}}, - }). - Write(t, c) - - // Wait for the endpoints controller to inform us that the endpoints are not being managed - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionUnmanaged) - // Ensure that the managed endpoints were deleted - c.WaitForDeletion(t, endpointsID) - - // Put the service back into managed mode. - service = rtest.ResourceID(service.Id). - WithData(t, &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{"api-"}}, - Ports: []*pbcatalog.ServicePort{{TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}}, - }). - Write(t, c) - - // Wait for the service endpoints to be regenerated - c.WaitForStatusCondition(t, service.Id, catalog.EndpointsStatusKey, catalog.EndpointsStatusConditionManaged) - c.RequireResourceExists(t, endpointsID) - - // Now delete the service and ensure that the endpoints eventually are deleted as well - c.MustDelete(t, service.Id) - c.WaitForDeletion(t, endpointsID) - -} - -func setHealthStatus(t *testing.T, client *rtest.Client, owner *pbresource.ID, name string, health pbcatalog.Health) *pbresource.Resource { - return rtest.Resource(catalog.HealthStatusV1Alpha1Type, name). - WithData(t, &pbcatalog.HealthStatus{ - Type: "synthetic", - Status: health, - }). - WithOwner(owner). - Write(t, client) -} diff --git a/internal/catalog/exports.go b/internal/catalog/exports.go index e0373bf7079bc..61247091be1cf 100644 --- a/internal/catalog/exports.go +++ b/internal/catalog/exports.go @@ -5,9 +5,6 @@ package catalog import ( "github.com/hashicorp/consul/internal/catalog/internal/controllers" - "github.com/hashicorp/consul/internal/catalog/internal/controllers/endpoints" - "github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth" - "github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth" "github.com/hashicorp/consul/internal/catalog/internal/mappers/nodemapper" "github.com/hashicorp/consul/internal/catalog/internal/mappers/selectiontracker" "github.com/hashicorp/consul/internal/catalog/internal/types" @@ -43,21 +40,6 @@ var ( HealthStatusV1Alpha1Type = types.HealthStatusV1Alpha1Type HealthChecksV1Alpha1Type = types.HealthChecksV1Alpha1Type DNSPolicyV1Alpha1Type = types.DNSPolicyV1Alpha1Type - - // Controller Statuses - NodeHealthStatusKey = nodehealth.StatusKey - NodeHealthStatusConditionHealthy = nodehealth.StatusConditionHealthy - NodeHealthConditions = nodehealth.Conditions - - WorkloadHealthStatusKey = workloadhealth.StatusKey - WorkloadHealthStatusConditionHealthy = workloadhealth.StatusConditionHealthy - WorkloadHealthConditions = workloadhealth.WorkloadConditions - WorkloadAndNodeHealthConditions = workloadhealth.NodeAndWorkloadConditions - - EndpointsStatusKey = endpoints.StatusKey - EndpointsStatusConditionEndpointsManaged = endpoints.StatusConditionEndpointsManaged - EndpointsStatusConditionManaged = endpoints.ConditionManaged - EndpointsStatusConditionUnmanaged = endpoints.ConditionUnmanaged ) // RegisterTypes adds all resource types within the "catalog" API group diff --git a/internal/resource/authz_oss.go b/internal/resource/authz_oss.go deleted file mode 100644 index 014318f22897d..0000000000000 --- a/internal/resource/authz_oss.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !consulent -// +build !consulent - -package resource - -import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// AuthorizerContext builds an ACL AuthorizerContext for the given tenancy. -func AuthorizerContext(t *pbresource.Tenancy) *acl.AuthorizerContext { - return &acl.AuthorizerContext{Peer: t.PeerName} -} diff --git a/internal/resource/resourcetest/builder.go b/internal/resource/resourcetest/builder.go index 749ff4fea27e4..7355f38824ec1 100644 --- a/internal/resource/resourcetest/builder.go +++ b/internal/resource/resourcetest/builder.go @@ -1,16 +1,11 @@ package resourcetest import ( - "strings" + "context" - "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/oklog/ulid/v2" "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/anypb" @@ -42,14 +37,6 @@ func Resource(rtype *pbresource.Type, name string) *resourceBuilder { } } -func ResourceID(id *pbresource.ID) *resourceBuilder { - return &resourceBuilder{ - resource: &pbresource.Resource{ - Id: id, - }, - } -} - func (b *resourceBuilder) WithData(t T, data protoreflect.ProtoMessage) *resourceBuilder { t.Helper() @@ -121,37 +108,22 @@ func (b *resourceBuilder) ID() *pbresource.ID { func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *pbresource.Resource { t.Helper() - ctx := testutil.TestContext(t) - res := b.resource - var rsp *pbresource.WriteResponse - var err error - - // Retry any writes where the error is a UID mismatch and the UID was not specified. This is indicative - // of using a follower to rewrite an object who is not perfectly in-sync with the leader. - retry.Run(t, func(r *retry.R) { - rsp, err = client.Write(ctx, &pbresource.WriteRequest{ - Resource: res, - }) - - if err == nil || res.Id.Uid != "" || status.Code(err) != codes.FailedPrecondition { - return - } - - if strings.Contains(err.Error(), storage.ErrWrongUid.Error()) { - r.Fatalf("resource write failed due to uid mismatch - most likely a transient issue when talking to a non-leader") - } else { - // other errors are unexpected and should cause an immediate failure - r.Stop(err) - } + rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{ + Resource: res, }) + require.NoError(t, err) + if !b.dontCleanup { - id := proto.Clone(rsp.Resource.Id).(*pbresource.ID) - id.Uid = "" - t.Cleanup(func() { - NewClient(client).MustDelete(t, id) + cleaner, ok := t.(CleanupT) + require.True(t, ok, "T does not implement a Cleanup method and cannot be used with automatic resource cleanup") + cleaner.Cleanup(func() { + _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{ + Id: rsp.Resource.Id, + }) + require.NoError(t, err) }) } @@ -164,7 +136,7 @@ func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *p ObservedGeneration: rsp.Resource.Generation, Conditions: original.Conditions, } - _, err := client.WriteStatus(ctx, &pbresource.WriteStatusRequest{ + _, err := client.WriteStatus(context.Background(), &pbresource.WriteStatusRequest{ Id: rsp.Resource.Id, Key: key, Status: status, @@ -172,7 +144,7 @@ func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *p require.NoError(t, err) } - readResp, err := client.Read(ctx, &pbresource.ReadRequest{ + readResp, err := client.Read(context.Background(), &pbresource.ReadRequest{ Id: rsp.Resource.Id, }) diff --git a/internal/resource/resourcetest/client.go b/internal/resource/resourcetest/client.go index 5047406d0585d..dab5b03c3adbe 100644 --- a/internal/resource/resourcetest/client.go +++ b/internal/resource/resourcetest/client.go @@ -1,13 +1,12 @@ package resourcetest import ( - "fmt" + "context" "math/rand" "time" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" @@ -36,14 +35,11 @@ func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration } func (client *Client) retry(t T, fn func(r *retry.R)) { - t.Helper() retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait} retry.RunWith(retryer, t, fn) } func (client *Client) PublishResources(t T, resources []*pbresource.Resource) { - ctx := testutil.TestContext(t) - // Randomize the order of insertion. Generally insertion order shouldn't matter as the // controllers should eventually converge on the desired state. The exception to this // is that you cannot insert resources with owner refs before the resource they are @@ -78,17 +74,12 @@ func (client *Client) PublishResources(t T, resources []*pbresource.Resource) { } t.Logf("Writing resource %s with type %s", res.Id.Name, resource.ToGVK(res.Id.Type)) - rsp, err := client.Write(ctx, &pbresource.WriteRequest{ + _, err := client.Write(context.Background(), &pbresource.WriteRequest{ Resource: res, }) require.NoError(t, err) - id := rsp.Resource.Id - t.Cleanup(func() { - client.MustDelete(t, id) - }) - - // track the number of resources published + // track the number o published += 1 written = append(written, res.Id) } @@ -110,7 +101,7 @@ func (client *Client) PublishResources(t T, resources []*pbresource.Resource) { func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) { t.Helper() - rsp, err := client.Read(testutil.TestContext(t), &pbresource.ReadRequest{Id: id}) + rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: id}) require.Error(t, err) require.Equal(t, codes.NotFound, status.Code(err)) require.Nil(t, rsp) @@ -119,7 +110,7 @@ func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) { func (client *Client) RequireResourceExists(t T, id *pbresource.ID) *pbresource.Resource { t.Helper() - rsp, err := client.Read(testutil.TestContext(t), &pbresource.ReadRequest{Id: id}) + rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: id}) require.NoError(t, err, "error reading %s with type %s", id.Name, resource.ToGVK(id.Type)) require.NotNil(t, rsp) return rsp.Resource @@ -190,7 +181,7 @@ func (client *Client) WaitForStatusCondition(t T, id *pbresource.ID, statusKey s var res *pbresource.Resource client.retry(t, func(r *retry.R) { - res = client.RequireStatusConditionForCurrentGen(r, id, statusKey, condition) + res = client.RequireStatusConditionForCurrentGen(t, id, statusKey, condition) }) return res @@ -218,14 +209,6 @@ func (client *Client) WaitForResourceState(t T, id *pbresource.ID, verify func(T return res } -func (client *Client) WaitForDeletion(t T, id *pbresource.ID) { - t.Helper() - - client.retry(t, func(r *retry.R) { - client.RequireResourceNotFound(r, id) - }) -} - // ResolveResourceID will read the specified resource and returns its full ID. // This is mainly useful to get the ID with the Uid filled out. func (client *Client) ResolveResourceID(t T, id *pbresource.ID) *pbresource.ID { @@ -233,24 +216,3 @@ func (client *Client) ResolveResourceID(t T, id *pbresource.ID) *pbresource.ID { return client.RequireResourceExists(t, id).Id } - -func (client *Client) MustDelete(t T, id *pbresource.ID) { - t.Helper() - ctx := testutil.TestContext(t) - - client.retry(t, func(r *retry.R) { - _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: id}) - if status.Code(err) == codes.NotFound { - return - } - - // codes.Aborted indicates a CAS failure and that the delete request should - // be retried. Anything else should be considered an unrecoverable error. - if err != nil && status.Code(err) != codes.Aborted { - r.Stop(fmt.Errorf("failed to delete the resource: %w", err)) - return - } - - require.NoError(r, err) - }) -} diff --git a/internal/resource/resourcetest/testing.go b/internal/resource/resourcetest/testing.go index 1c774082b3692..d02b70da9d039 100644 --- a/internal/resource/resourcetest/testing.go +++ b/internal/resource/resourcetest/testing.go @@ -9,5 +9,9 @@ type T interface { Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) FailNow() +} + +type CleanupT interface { + T Cleanup(func()) } diff --git a/proto-public/pbmesh/v1alpha1/connection.pb.go b/proto-public/pbmesh/v1alpha1/connection.pb.go index 65fa3ba329dd5..5edc0ee76d1d7 100644 --- a/proto-public/pbmesh/v1alpha1/connection.pb.go +++ b/proto-public/pbmesh/v1alpha1/connection.pb.go @@ -23,50 +23,50 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type BalanceConnections int32 +type BalanceInboundConnections int32 const ( // buf:lint:ignore ENUM_ZERO_VALUE_SUFFIX - BalanceConnections_BALANCE_CONNECTIONS_DEFAULT BalanceConnections = 0 - BalanceConnections_BALANCE_CONNECTIONS_EXACT BalanceConnections = 1 + BalanceInboundConnections_BALANCE_INBOUND_CONNECTIONS_DEFAULT BalanceInboundConnections = 0 + BalanceInboundConnections_BALANCE_INBOUND_CONNECTIONS_EXACT BalanceInboundConnections = 1 ) -// Enum value maps for BalanceConnections. +// Enum value maps for BalanceInboundConnections. var ( - BalanceConnections_name = map[int32]string{ - 0: "BALANCE_CONNECTIONS_DEFAULT", - 1: "BALANCE_CONNECTIONS_EXACT", + BalanceInboundConnections_name = map[int32]string{ + 0: "BALANCE_INBOUND_CONNECTIONS_DEFAULT", + 1: "BALANCE_INBOUND_CONNECTIONS_EXACT", } - BalanceConnections_value = map[string]int32{ - "BALANCE_CONNECTIONS_DEFAULT": 0, - "BALANCE_CONNECTIONS_EXACT": 1, + BalanceInboundConnections_value = map[string]int32{ + "BALANCE_INBOUND_CONNECTIONS_DEFAULT": 0, + "BALANCE_INBOUND_CONNECTIONS_EXACT": 1, } ) -func (x BalanceConnections) Enum() *BalanceConnections { - p := new(BalanceConnections) +func (x BalanceInboundConnections) Enum() *BalanceInboundConnections { + p := new(BalanceInboundConnections) *p = x return p } -func (x BalanceConnections) String() string { +func (x BalanceInboundConnections) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (BalanceConnections) Descriptor() protoreflect.EnumDescriptor { +func (BalanceInboundConnections) Descriptor() protoreflect.EnumDescriptor { return file_pbmesh_v1alpha1_connection_proto_enumTypes[0].Descriptor() } -func (BalanceConnections) Type() protoreflect.EnumType { +func (BalanceInboundConnections) Type() protoreflect.EnumType { return &file_pbmesh_v1alpha1_connection_proto_enumTypes[0] } -func (x BalanceConnections) Number() protoreflect.EnumNumber { +func (x BalanceInboundConnections) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use BalanceConnections.Descriptor instead. -func (BalanceConnections) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use BalanceInboundConnections.Descriptor instead. +func (BalanceInboundConnections) EnumDescriptor() ([]byte, []int) { return file_pbmesh_v1alpha1_connection_proto_rawDescGZIP(), []int{0} } @@ -130,8 +130,8 @@ type InboundConnectionsConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - MaxInboundConnections uint64 `protobuf:"varint,12,opt,name=max_inbound_connections,json=maxInboundConnections,proto3" json:"max_inbound_connections,omitempty"` - BalanceInboundConnections BalanceConnections `protobuf:"varint,13,opt,name=balance_inbound_connections,json=balanceInboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceConnections" json:"balance_inbound_connections,omitempty"` + MaxInboundConnections uint64 `protobuf:"varint,12,opt,name=max_inbound_connections,json=maxInboundConnections,proto3" json:"max_inbound_connections,omitempty"` + BalanceInboundConnections BalanceInboundConnections `protobuf:"varint,13,opt,name=balance_inbound_connections,json=balanceInboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections" json:"balance_inbound_connections,omitempty"` } func (x *InboundConnectionsConfig) Reset() { @@ -173,11 +173,11 @@ func (x *InboundConnectionsConfig) GetMaxInboundConnections() uint64 { return 0 } -func (x *InboundConnectionsConfig) GetBalanceInboundConnections() BalanceConnections { +func (x *InboundConnectionsConfig) GetBalanceInboundConnections() BalanceInboundConnections { if x != nil { return x.BalanceInboundConnections } - return BalanceConnections_BALANCE_CONNECTIONS_DEFAULT + return BalanceInboundConnections_BALANCE_INBOUND_CONNECTIONS_DEFAULT } var File_pbmesh_v1alpha1_connection_proto protoreflect.FileDescriptor @@ -194,43 +194,45 @@ var file_pbmesh_v1alpha1_connection_proto_rawDesc = []byte{ 0x75, 0x74, 0x4d, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x4d, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x18, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, + 0x4d, 0x73, 0x22, 0xcd, 0x01, 0x0a, 0x18, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x72, 0x0a, 0x1b, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x79, 0x0a, 0x1b, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x68, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x19, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2a, 0x54, 0x0a, 0x12, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, - 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, - 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x43, 0x4f, - 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, - 0x01, 0x42, 0x97, 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, - 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, - 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, - 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, - 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2a, 0x6b, 0x0a, 0x19, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x27, 0x0a, 0x23, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x49, 0x4e, 0x42, 0x4f, 0x55, + 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x25, 0x0a, 0x21, 0x42, 0x41, 0x4c, 0x41, + 0x4e, 0x43, 0x45, 0x5f, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x01, 0x42, + 0x97, 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, + 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, + 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, + 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -248,12 +250,12 @@ func file_pbmesh_v1alpha1_connection_proto_rawDescGZIP() []byte { var file_pbmesh_v1alpha1_connection_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_pbmesh_v1alpha1_connection_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_pbmesh_v1alpha1_connection_proto_goTypes = []interface{}{ - (BalanceConnections)(0), // 0: hashicorp.consul.mesh.v1alpha1.BalanceConnections + (BalanceInboundConnections)(0), // 0: hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections (*ConnectionConfig)(nil), // 1: hashicorp.consul.mesh.v1alpha1.ConnectionConfig (*InboundConnectionsConfig)(nil), // 2: hashicorp.consul.mesh.v1alpha1.InboundConnectionsConfig } var file_pbmesh_v1alpha1_connection_proto_depIdxs = []int32{ - 0, // 0: hashicorp.consul.mesh.v1alpha1.InboundConnectionsConfig.balance_inbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceConnections + 0, // 0: hashicorp.consul.mesh.v1alpha1.InboundConnectionsConfig.balance_inbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name diff --git a/proto-public/pbmesh/v1alpha1/connection.proto b/proto-public/pbmesh/v1alpha1/connection.proto index 1d054e503b30d..8a1f4f0e7c575 100644 --- a/proto-public/pbmesh/v1alpha1/connection.proto +++ b/proto-public/pbmesh/v1alpha1/connection.proto @@ -12,11 +12,11 @@ message ConnectionConfig { message InboundConnectionsConfig { uint64 max_inbound_connections = 12; - BalanceConnections balance_inbound_connections = 13; + BalanceInboundConnections balance_inbound_connections = 13; } -enum BalanceConnections { +enum BalanceInboundConnections { // buf:lint:ignore ENUM_ZERO_VALUE_SUFFIX - BALANCE_CONNECTIONS_DEFAULT = 0; - BALANCE_CONNECTIONS_EXACT = 1; + BALANCE_INBOUND_CONNECTIONS_DEFAULT = 0; + BALANCE_INBOUND_CONNECTIONS_EXACT = 1; } diff --git a/proto-public/pbmesh/v1alpha1/upstreams.pb.go b/proto-public/pbmesh/v1alpha1/upstreams.pb.go index 93b151a3366d3..575fe43006e46 100644 --- a/proto-public/pbmesh/v1alpha1/upstreams.pb.go +++ b/proto-public/pbmesh/v1alpha1/upstreams.pb.go @@ -432,11 +432,11 @@ type UpstreamConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ConnectTimeoutMs uint64 `protobuf:"varint,2,opt,name=connect_timeout_ms,json=connectTimeoutMs,proto3" json:"connect_timeout_ms,omitempty"` - Limits *UpstreamLimits `protobuf:"bytes,3,opt,name=limits,proto3" json:"limits,omitempty"` - PassiveHealthCheck *PassiveHealthCheck `protobuf:"bytes,4,opt,name=passive_health_check,json=passiveHealthCheck,proto3" json:"passive_health_check,omitempty"` - BalanceOutboundConnections BalanceConnections `protobuf:"varint,5,opt,name=balance_outbound_connections,json=balanceOutboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceConnections" json:"balance_outbound_connections,omitempty"` - MeshGatewayMode MeshGatewayMode `protobuf:"varint,6,opt,name=mesh_gateway_mode,json=meshGatewayMode,proto3,enum=hashicorp.consul.mesh.v1alpha1.MeshGatewayMode" json:"mesh_gateway_mode,omitempty"` + ConnectTimeoutMs uint64 `protobuf:"varint,2,opt,name=connect_timeout_ms,json=connectTimeoutMs,proto3" json:"connect_timeout_ms,omitempty"` + Limits *UpstreamLimits `protobuf:"bytes,3,opt,name=limits,proto3" json:"limits,omitempty"` + PassiveHealthCheck *PassiveHealthCheck `protobuf:"bytes,4,opt,name=passive_health_check,json=passiveHealthCheck,proto3" json:"passive_health_check,omitempty"` + BalanceInboundConnections BalanceInboundConnections `protobuf:"varint,5,opt,name=balance_inbound_connections,json=balanceInboundConnections,proto3,enum=hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections" json:"balance_inbound_connections,omitempty"` + MeshGatewayMode MeshGatewayMode `protobuf:"varint,6,opt,name=mesh_gateway_mode,json=meshGatewayMode,proto3,enum=hashicorp.consul.mesh.v1alpha1.MeshGatewayMode" json:"mesh_gateway_mode,omitempty"` } func (x *UpstreamConfig) Reset() { @@ -492,11 +492,11 @@ func (x *UpstreamConfig) GetPassiveHealthCheck() *PassiveHealthCheck { return nil } -func (x *UpstreamConfig) GetBalanceOutboundConnections() BalanceConnections { +func (x *UpstreamConfig) GetBalanceInboundConnections() BalanceInboundConnections { if x != nil { - return x.BalanceOutboundConnections + return x.BalanceInboundConnections } - return BalanceConnections_BALANCE_CONNECTIONS_DEFAULT + return BalanceInboundConnections_BALANCE_INBOUND_CONNECTIONS_DEFAULT } func (x *UpstreamConfig) GetMeshGatewayMode() MeshGatewayMode { @@ -740,7 +740,7 @@ var file_pbmesh_v1alpha1_upstreams_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x22, 0xbf, 0x03, 0x0a, 0x0e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x22, 0xc4, 0x03, 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, @@ -755,60 +755,60 @@ var file_pbmesh_v1alpha1_upstreams_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x12, 0x70, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x74, 0x0a, 0x1c, 0x62, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x79, 0x0a, 0x1b, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x1a, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x75, 0x74, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x5b, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, - 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x68, 0x61, + 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, + 0x65, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, + 0x64, 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, + 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x61, + 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x73, + 0x73, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, + 0x78, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x6e, 0x66, + 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x35, 0x78, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x76, 0x65, 0x35, 0x78, 0x78, 0x42, 0x96, 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, - 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x73, - 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0f, 0x6d, 0x65, - 0x73, 0x68, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa3, 0x01, - 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6d, - 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, - 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x73, 0x73, 0x69, 0x76, 0x65, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x46, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, - 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, - 0x42, 0x96, 0x02, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, - 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, - 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, - 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x65, 0x73, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1e, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, + 0x65, 0x73, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1e, 0x48, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, + 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2a, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x48, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, + 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -835,7 +835,7 @@ var file_pbmesh_v1alpha1_upstreams_proto_goTypes = []interface{}{ (*PassiveHealthCheck)(nil), // 7: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck (*v1alpha1.WorkloadSelector)(nil), // 8: hashicorp.consul.catalog.v1alpha1.WorkloadSelector (*pbresource.ID)(nil), // 9: hashicorp.consul.resource.ID - (BalanceConnections)(0), // 10: hashicorp.consul.mesh.v1alpha1.BalanceConnections + (BalanceInboundConnections)(0), // 10: hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections (MeshGatewayMode)(0), // 11: hashicorp.consul.mesh.v1alpha1.MeshGatewayMode (*durationpb.Duration)(nil), // 12: google.protobuf.Duration } @@ -853,7 +853,7 @@ var file_pbmesh_v1alpha1_upstreams_proto_depIdxs = []int32{ 5, // 10: hashicorp.consul.mesh.v1alpha1.PreparedQueryUpstream.upstream_config:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamConfig 6, // 11: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.limits:type_name -> hashicorp.consul.mesh.v1alpha1.UpstreamLimits 7, // 12: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.passive_health_check:type_name -> hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck - 10, // 13: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.balance_outbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceConnections + 10, // 13: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.balance_inbound_connections:type_name -> hashicorp.consul.mesh.v1alpha1.BalanceInboundConnections 11, // 14: hashicorp.consul.mesh.v1alpha1.UpstreamConfig.mesh_gateway_mode:type_name -> hashicorp.consul.mesh.v1alpha1.MeshGatewayMode 12, // 15: hashicorp.consul.mesh.v1alpha1.PassiveHealthCheck.interval:type_name -> google.protobuf.Duration 16, // [16:16] is the sub-list for method output_type diff --git a/proto-public/pbmesh/v1alpha1/upstreams.proto b/proto-public/pbmesh/v1alpha1/upstreams.proto index c1f444e9ccc27..9239bac774b8e 100644 --- a/proto-public/pbmesh/v1alpha1/upstreams.proto +++ b/proto-public/pbmesh/v1alpha1/upstreams.proto @@ -61,7 +61,7 @@ message UpstreamConfig { uint64 connect_timeout_ms = 2; UpstreamLimits limits = 3; PassiveHealthCheck passive_health_check = 4; - BalanceConnections balance_outbound_connections = 5; + BalanceInboundConnections balance_inbound_connections = 5; MeshGatewayMode mesh_gateway_mode = 6; } diff --git a/proto/private/prototest/testing.go b/proto/private/prototest/testing.go index b423478155d11..28341012afa6d 100644 --- a/proto/private/prototest/testing.go +++ b/proto/private/prototest/testing.go @@ -100,5 +100,5 @@ func AssertContainsElement[V any](t TestingT, list []V, element V, opts ...cmp.O } } - t.Fatalf("assertion failed: list does not contain element\n--- list\n%+v\n--- element: %+v", list, element) + t.Fatalf("assertion failed: list does not contain element\n--- list\n%#v\n--- element: %#v", list, element) } diff --git a/sdk/testutil/context.go b/sdk/testutil/context.go index 47ff794c96c6d..257f205aa298e 100644 --- a/sdk/testutil/context.go +++ b/sdk/testutil/context.go @@ -5,14 +5,10 @@ package testutil import ( "context" + "testing" ) -type CleanerT interface { - Helper() - Cleanup(func()) -} - -func TestContext(t CleanerT) context.Context { +func TestContext(t *testing.T) context.Context { t.Helper() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) diff --git a/sdk/testutil/retry/counter.go b/sdk/testutil/retry/counter.go deleted file mode 100644 index 96a37ab9d2fcf..0000000000000 --- a/sdk/testutil/retry/counter.go +++ /dev/null @@ -1,23 +0,0 @@ -package retry - -import "time" - -// Counter repeats an operation a given number of -// times and waits between subsequent operations. -type Counter struct { - Count int - Wait time.Duration - - count int -} - -func (r *Counter) Continue() bool { - if r.count == r.Count { - return false - } - if r.count > 0 { - time.Sleep(r.Wait) - } - r.count++ - return true -} diff --git a/sdk/testutil/retry/retry.go b/sdk/testutil/retry/retry.go index af468460d592a..30045f0c629d6 100644 --- a/sdk/testutil/retry/retry.go +++ b/sdk/testutil/retry/retry.go @@ -53,8 +53,6 @@ type R struct { // and triggers t.FailNow() done bool output []string - - cleanups []func() } func (r *R) Logf(format string, args ...interface{}) { @@ -67,41 +65,6 @@ func (r *R) Log(args ...interface{}) { func (r *R) Helper() {} -// Cleanup register a function to be run to cleanup resources that -// were allocated during the retry attempt. These functions are executed -// after a retry attempt. If they panic, it will not stop further retry -// attempts but will be cause for the overall test failure. -func (r *R) Cleanup(fn func()) { - r.cleanups = append(r.cleanups, fn) -} - -func (r *R) runCleanup() { - - // Make sure that if a cleanup function panics, - // we still run the remaining cleanup functions. - defer func() { - err := recover() - if err != nil { - r.Stop(fmt.Errorf("error when performing test cleanup: %v", err)) - } - if len(r.cleanups) > 0 { - r.runCleanup() - } - }() - - for len(r.cleanups) > 0 { - var cleanup func() - if len(r.cleanups) > 0 { - last := len(r.cleanups) - 1 - cleanup = r.cleanups[last] - r.cleanups = r.cleanups[:last] - } - if cleanup != nil { - cleanup() - } - } -} - // runFailed is a sentinel value to indicate that the func itself // didn't panic, rather that `FailNow` was called. type runFailed struct{} @@ -227,7 +190,6 @@ func run(r Retryer, t Failer, f func(r *R)) { // run f(rr), but if recover yields a runFailed value, we know // FailNow was called. func() { - defer rr.runCleanup() defer func() { if p := recover(); p != nil && p != (runFailed{}) { panic(p) @@ -254,6 +216,16 @@ func DefaultFailer() *Timer { return &Timer{Timeout: 7 * time.Second, Wait: 25 * time.Millisecond} } +// TwoSeconds repeats an operation for two seconds and waits 25ms in between. +func TwoSeconds() *Timer { + return &Timer{Timeout: 2 * time.Second, Wait: 25 * time.Millisecond} +} + +// ThreeTimes repeats an operation three times and waits 25ms in between. +func ThreeTimes() *Counter { + return &Counter{Count: 3, Wait: 25 * time.Millisecond} +} + // Retryer provides an interface for repeating operations // until they succeed or an exit condition is met. type Retryer interface { @@ -261,3 +233,47 @@ type Retryer interface { // returns false to indicate retrying should stop. Continue() bool } + +// Counter repeats an operation a given number of +// times and waits between subsequent operations. +type Counter struct { + Count int + Wait time.Duration + + count int +} + +func (r *Counter) Continue() bool { + if r.count == r.Count { + return false + } + if r.count > 0 { + time.Sleep(r.Wait) + } + r.count++ + return true +} + +// Timer repeats an operation for a given amount +// of time and waits between subsequent operations. +type Timer struct { + Timeout time.Duration + Wait time.Duration + + // stop is the timeout deadline. + // TODO: Next()? + // Set on the first invocation of Next(). + stop time.Time +} + +func (r *Timer) Continue() bool { + if r.stop.IsZero() { + r.stop = time.Now().Add(r.Timeout) + return true + } + if time.Now().After(r.stop) { + return false + } + time.Sleep(r.Wait) + return true +} diff --git a/sdk/testutil/retry/retry_test.go b/sdk/testutil/retry/retry_test.go index 77bc2d4d9f96b..1f7eda7b31338 100644 --- a/sdk/testutil/retry/retry_test.go +++ b/sdk/testutil/retry/retry_test.go @@ -128,69 +128,6 @@ func TestRunWith(t *testing.T) { }) } -func TestCleanup(t *testing.T) { - t.Run("basic", func(t *testing.T) { - ft := &fakeT{} - cleanupsExecuted := 0 - RunWith(&Counter{Count: 2, Wait: time.Millisecond}, ft, func(r *R) { - r.Cleanup(func() { - cleanupsExecuted += 1 - }) - }) - - require.Equal(t, 0, ft.fails) - require.Equal(t, 1, cleanupsExecuted) - }) - t.Run("cleanup-panic-recovery", func(t *testing.T) { - ft := &fakeT{} - cleanupsExecuted := 0 - RunWith(&Counter{Count: 2, Wait: time.Millisecond}, ft, func(r *R) { - r.Cleanup(func() { - cleanupsExecuted += 1 - }) - - r.Cleanup(func() { - cleanupsExecuted += 1 - panic(fmt.Errorf("fake test error")) - }) - - r.Cleanup(func() { - cleanupsExecuted += 1 - }) - - // test is successful but should fail due to the cleanup panicing - }) - - require.Equal(t, 3, cleanupsExecuted) - require.Equal(t, 1, ft.fails) - require.Contains(t, ft.out[0], "fake test error") - }) - - t.Run("cleanup-per-retry", func(t *testing.T) { - ft := &fakeT{} - iter := 0 - cleanupsExecuted := 0 - RunWith(&Counter{Count: 3, Wait: time.Millisecond}, ft, func(r *R) { - if cleanupsExecuted != iter { - r.Stop(fmt.Errorf("cleanups not executed between retries")) - return - } - iter += 1 - - r.Cleanup(func() { - cleanupsExecuted += 1 - }) - - r.FailNow() - }) - - require.Equal(t, 3, cleanupsExecuted) - // ensure that r.Stop hadn't been called. If it was then we would - // have log output - require.Len(t, ft.out, 0) - }) -} - type fakeT struct { fails int out []string diff --git a/sdk/testutil/retry/timer.go b/sdk/testutil/retry/timer.go deleted file mode 100644 index a26593ddd72e0..0000000000000 --- a/sdk/testutil/retry/timer.go +++ /dev/null @@ -1,37 +0,0 @@ -package retry - -import "time" - -// TwoSeconds repeats an operation for two seconds and waits 25ms in between. -func TwoSeconds() *Timer { - return &Timer{Timeout: 2 * time.Second, Wait: 25 * time.Millisecond} -} - -// ThreeTimes repeats an operation three times and waits 25ms in between. -func ThreeTimes() *Counter { - return &Counter{Count: 3, Wait: 25 * time.Millisecond} -} - -// Timer repeats an operation for a given amount -// of time and waits between subsequent operations. -type Timer struct { - Timeout time.Duration - Wait time.Duration - - // stop is the timeout deadline. - // TODO: Next()? - // Set on the first invocation of Next(). - stop time.Time -} - -func (r *Timer) Continue() bool { - if r.stop.IsZero() { - r.stop = time.Now().Add(r.Timeout) - return true - } - if time.Now().After(r.stop) { - return false - } - time.Sleep(r.Wait) - return true -} diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index 9eba92ca55fb1..72eae4884f67c 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -11,7 +11,6 @@ require ( github.com/hashicorp/consul v0.0.0-00010101000000-000000000000 github.com/hashicorp/consul/api v1.22.0-rc1 github.com/hashicorp/consul/envoyextensions v0.3.0-rc1 - github.com/hashicorp/consul/proto-public v0.4.0-rc1 github.com/hashicorp/consul/sdk v0.14.0-rc1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-multierror v1.1.1 @@ -37,7 +36,6 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/armon/go-radix v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect github.com/containerd/containerd v1.7.1 // indirect @@ -51,7 +49,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect @@ -60,7 +57,6 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/itchyny/timefmt-go v0.1.4 // indirect @@ -68,7 +64,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.41 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -76,7 +72,6 @@ require ( github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/oklog/ulid/v2 v2.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/opencontainers/runc v1.1.7 // indirect @@ -87,19 +82,20 @@ require ( golang.org/x/crypto v0.1.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.1 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.4.0 // indirect ) -replace ( - github.com/hashicorp/consul => ../../.. - github.com/hashicorp/consul/api => ../../../api - github.com/hashicorp/consul/envoyextensions => ../../../envoyextensions - github.com/hashicorp/consul/proto-public => ../../../proto-public - github.com/hashicorp/consul/sdk => ../../../sdk -) +replace github.com/hashicorp/consul/api => ../../../api + +replace github.com/hashicorp/consul/sdk => ../../../sdk + +replace github.com/hashicorp/consul => ../../.. + +replace github.com/hashicorp/consul/envoyextensions => ../../../envoyextensions diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index 02a74ddbe68ef..1e33534485dbe 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -14,7 +14,6 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v4.8.2+incompatible h1:qbcKSx29aBLD+5QLvlQZlGmRMF/FfGqFLFev/1TDzRo= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= @@ -27,25 +26,18 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= @@ -106,16 +98,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 h1:wzWurXrxfSyG1PHskIZlfuXlTSCj1Tsyatp9DtaasuY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= @@ -124,7 +112,6 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -133,13 +120,11 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -150,18 +135,12 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8= -github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/itchyny/gojq v0.12.9 h1:biKpbKwMxVYhCU1d6mR7qMr3f0Hn9F5k5YykCVb3gmM= @@ -200,20 +179,16 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -232,8 +207,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= -github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= @@ -246,7 +219,6 @@ github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -259,19 +231,15 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -283,28 +251,22 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI= github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI= github.com/testcontainers/testcontainers-go v0.20.1 h1:mK15UPJ8c5P+NsQKmkqzs/jMdJt6JMs5vlw2y4j92c0= github.com/testcontainers/testcontainers-go v0.20.1/go.mod h1:zb+NOlCQBkZ7RQp4QI+YMIHyO2CQ/qsXzNF5eLJ24SY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -321,7 +283,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -336,9 +297,7 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -350,7 +309,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -367,10 +325,9 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -388,6 +345,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -397,7 +355,7 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -431,5 +389,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/test/integration/consul-container/libs/cluster/agent.go b/test/integration/consul-container/libs/cluster/agent.go index 2de346406d9e9..6753fd8c017e1 100644 --- a/test/integration/consul-container/libs/cluster/agent.go +++ b/test/integration/consul-container/libs/cluster/agent.go @@ -8,7 +8,6 @@ import ( "io" "github.com/testcontainers/testcontainers-go" - "google.golang.org/grpc" "github.com/hashicorp/consul/api" @@ -37,7 +36,6 @@ type Agent interface { Upgrade(ctx context.Context, config Config) error Exec(ctx context.Context, cmd []string) (string, error) DataDir() string - GetGRPCConn() *grpc.ClientConn } // Config is a set of configurations required to create a Agent diff --git a/test/integration/consul-container/libs/cluster/container.go b/test/integration/consul-container/libs/cluster/container.go index a371404bafe09..7ed88b0d824f5 100644 --- a/test/integration/consul-container/libs/cluster/container.go +++ b/test/integration/consul-container/libs/cluster/container.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "net/url" "os" "path/filepath" "strconv" @@ -16,14 +15,11 @@ import ( goretry "github.com/avast/retry-go" dockercontainer "github.com/docker/docker/api/types/container" - "github.com/docker/go-connections/nat" "github.com/hashicorp/go-multierror" "github.com/otiai10/copy" "github.com/pkg/errors" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "github.com/hashicorp/consul/api" @@ -62,8 +58,6 @@ type consulContainerNode struct { clientCACertFile string ip string - grpcConn *grpc.ClientConn - nextAdminPortOffset int nextConnectPortOffset int @@ -178,8 +172,7 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, po clientAddr string clientCACertFile string - info AgentInfo - grpcConn *grpc.ClientConn + info AgentInfo ) debugURI := "" if utils.Debug { @@ -243,28 +236,6 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, po info.CACertFile = clientCACertFile } - // TODO: Support gRPC+TLS port. - if pc.Ports.GRPC > 0 { - port, err := nat.NewPort("tcp", strconv.Itoa(pc.Ports.GRPC)) - if err != nil { - return nil, fmt.Errorf("failed to parse gRPC TLS port: %w", err) - } - endpoint, err := podContainer.PortEndpoint(ctx, port, "tcp") - if err != nil { - return nil, fmt.Errorf("failed to get gRPC TLS endpoint: %w", err) - } - url, err := url.Parse(endpoint) - if err != nil { - return nil, fmt.Errorf("failed to parse gRPC endpoint URL: %w", err) - } - conn, err := grpc.Dial(url.Host, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, fmt.Errorf("failed to dial gRPC connection: %w", err) - } - deferClean.Add(func() { _ = conn.Close() }) - grpcConn = conn - } - ip, err := podContainer.ContainerIP(ctx) if err != nil { return nil, err @@ -311,7 +282,6 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, po name: name, ip: ip, info: info, - grpcConn: grpcConn, } if httpPort > 0 || httpsPort > 0 { @@ -406,10 +376,6 @@ func (c *consulContainerNode) GetClient() *api.Client { return c.client } -func (c *consulContainerNode) GetGRPCConn() *grpc.ClientConn { - return c.grpcConn -} - // NewClient returns an API client by making a new one based on the provided token // - updateDefault: if true update the default client func (c *consulContainerNode) NewClient(token string, updateDefault bool) (*api.Client, error) { @@ -542,10 +508,6 @@ func (c *consulContainerNode) terminate(retainPod bool, skipFuncs bool) error { continue } } - - // if the pod is retained and therefore the IP then the grpc conn - // should handle reconnecting so there is no reason to close it. - c.closeGRPC() } var merr error @@ -567,16 +529,6 @@ func (c *consulContainerNode) terminate(retainPod bool, skipFuncs bool) error { return merr } -func (c *consulContainerNode) closeGRPC() error { - if c.grpcConn != nil { - if err := c.grpcConn.Close(); err != nil { - return err - } - c.grpcConn = nil - } - return nil -} - func (c *consulContainerNode) DataDir() string { return c.dataDir } @@ -613,7 +565,6 @@ func newContainerRequest(config Config, opts containerOpts, ports ...int) (podRe ExposedPorts: []string{ "8500/tcp", // Consul HTTP API "8501/tcp", // Consul HTTPs API - "8502/tcp", // Consul gRPC API "8443/tcp", // Envoy Gateway Listener diff --git a/test/integration/consul-container/libs/cluster/network.go b/test/integration/consul-container/libs/cluster/network.go index 6e170b3dabc19..e0ee10f4e35ff 100644 --- a/test/integration/consul-container/libs/cluster/network.go +++ b/test/integration/consul-container/libs/cluster/network.go @@ -20,7 +20,6 @@ func createNetwork(t TestingT, name string) (testcontainers.Network, error) { Name: name, Attachable: true, CheckDuplicate: true, - SkipReaper: isRYUKDisabled(), }, } first := true diff --git a/test/integration/consul-container/libs/service/helpers.go b/test/integration/consul-container/libs/service/helpers.go index 70624bf001d8b..ac254b846ae73 100644 --- a/test/integration/consul-container/libs/service/helpers.go +++ b/test/integration/consul-container/libs/service/helpers.go @@ -46,7 +46,6 @@ type ServiceOpts struct { Checks Checks Connect SidecarService Namespace string - Locality *api.Locality } // createAndRegisterStaticServerAndSidecar register the services and launch static-server containers @@ -120,7 +119,6 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts Namespace: serviceOpts.Namespace, Meta: serviceOpts.Meta, Check: &agentCheck, - Locality: serviceOpts.Locality, } return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, containerArgs...) } diff --git a/test/integration/consul-container/libs/utils/docker.go b/test/integration/consul-container/libs/utils/docker.go index 6be46d91aee10..109205855cd5a 100644 --- a/test/integration/consul-container/libs/utils/docker.go +++ b/test/integration/consul-container/libs/utils/docker.go @@ -9,9 +9,6 @@ import ( "io" "os" "os/exec" - "strings" - - "github.com/hashicorp/go-version" ) // DockerExec simply shell out to the docker CLI binary on your host. @@ -19,18 +16,6 @@ func DockerExec(args []string, stdout io.Writer) error { return cmdExec("docker", "docker", args, stdout, "") } -// DockerImageVersion retrieves the value of the org.opencontainers.image.version label from the specified image. -func DockerImageVersion(imageName string) (*version.Version, error) { - var b strings.Builder - err := cmdExec("docker", "docker", []string{"image", "inspect", "--format", `{{index .Config.Labels "org.opencontainers.image.version"}}`, imageName}, &b, "") - if err != nil { - return nil, err - } - output := b.String() - - return version.NewVersion(strings.TrimSpace(output)) -} - func cmdExec(name, binary string, args []string, stdout io.Writer, dir string) error { if binary == "" { panic("binary named " + name + " was not detected") diff --git a/test/integration/consul-container/test/catalog/catalog_test.go b/test/integration/consul-container/test/catalog/catalog_test.go deleted file mode 100644 index 8520e5a647e80..0000000000000 --- a/test/integration/consul-container/test/catalog/catalog_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package catalog - -import ( - "testing" - - "github.com/stretchr/testify/require" - - libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" - libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" - - "github.com/hashicorp/consul/internal/catalog/catalogtest" - pbresource "github.com/hashicorp/consul/proto-public/pbresource" -) - -func TestCatalog(t *testing.T) { - t.Parallel() - - cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{ - NumServers: 3, - BuildOpts: &libcluster.BuildOptions{Datacenter: "dc1"}, - Cmd: `-hcl=experiments=["resource-apis"]`, - }) - - followers, err := cluster.Followers() - require.NoError(t, err) - client := pbresource.NewResourceServiceClient(followers[0].GetGRPCConn()) - - t.Run("one-shot", func(t *testing.T) { - catalogtest.RunCatalogV1Alpha1IntegrationTest(t, client) - }) - - t.Run("lifecycle", func(t *testing.T) { - catalogtest.RunCatalogV1Alpha1LifecycleIntegrationTest(t, client) - }) -} diff --git a/test/integration/consul-container/test/debugging.md b/test/integration/consul-container/test/debugging.md deleted file mode 100644 index 2957b520ac074..0000000000000 --- a/test/integration/consul-container/test/debugging.md +++ /dev/null @@ -1,78 +0,0 @@ -# Remote Debugging Integration Tests - -- [Introduction](#introduction) - - [How it works](#how-it-works) -- [Getting Started](#getting-started) - - [Prerequisites](#prerequisites) - - [Running Upgrade integration tests](#debugging-integration-tests) - - [Building images](#building-images) - - [Remote debugging using GoLand](#remote-debugging-using-goland) - - -## Introduction - -Remote debugging integration tests allows you to attach your debugger to the consul container and debug go code running on that container. - -### How it works -The `dev-docker-dbg` Make target will build consul docker container that has the following: -- [delve (dlv) debugger](https://github.com/go-delve/delve) installed. -- a port exposed on the container that allows a debugger from your development environment to connect and attach to the consul process and debug it remotely. -- logs out the host and port information so that you have the information needed to connect to the port. - -The integration tests have been modified to expose the `--debug` flag that will switch the test from using a `consul:local` image that can be built using `make dev-docker` to using the `consul-dbg:local` image that was built from `make dev-docker-dbg`. - -The test is run in debug mode with a breakpoint set to just after the cluster is created and you can retrieve the port information. From there, you can set up a remote debugging session that connects to this port. - -## Getting Started -### Prerequisites -To run/debug integration tests locally, the following tools are required on your machine: -- Install [Go](https://go.dev/) (the version should match that of our CI config's Go image). -- Install [`Makefile`](https://www.gnu.org/software/make/manual/make.html). -- Install [`Docker`](https://docs.docker.com/get-docker/) required to run tests locally. - -### Debugging integration tests -#### Building images -- Build a consul image with dlv installed and a port exposed that the debugger can attach to. - ``` - make dev-docker-dbg - ``` -- Build a consul-envoy container image from the consul root directory that is required for testing but not for debugging. - ``` - docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=consul:local --build-arg ENVOY_VERSION=1.24.6 -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - ``` - -#### Remote debugging using GoLand -(For additional information, see [GoLand's documentation on remote debugging](https://www.jetbrains.com/help/go/attach-to-running-go-processes-with-debugger.html#attach-to-a-process-on-a-remote-machine).) -##### Set up the Debug Configuration for your test -- Create the configuration for debugging the test. (You may have to debug the test once so GoLand creates the configuration for you.) -- Go to `Run > Edit Configurations` and select the appropriate configuration. -- Add `--debug` to `Program arguments` and click OK. - - isolated -##### Obtain the debug port of your container -(This is required every time a test is debugged.) - -- Put a breakpoint in the test that you are running right after the cluster has been created. This should be on the line after the call to `topology.NewCluster()`. -- Debug the test and wait for the debug session to stop on the breakpoint in the test. -- In the Debug window, search for `debug info` on the Console tab and note the host and port. - - isolated -- Go to `Run > Edit Configurations` and add a `Go Remote` configuration with the host and port that your test has exposed. Click OK. - - isolated -- Debug the configuration that you just created. Verify that it shows as connected in the `Debugger` of this configuration in the `Debug` window. - - isolated -##### Debug the consul backend -- Set an appropriate breakpoint in the backend code of the endpoint that your test will call and that you wish to debug. -- Go to the test debugging tab for the integration test in the `Debug` window and `Resume Program`. - - isolated -- The remote debugging session should stop on the breakpoint, and you can freely debug the code path. - - isolated - -#### Remote debugging using VSCode -(For additional information, see [VSCode's documentation on remote debugging](https://github.com/golang/vscode-go/blob/master/docs/debugging.md#remote-debugging).) - -[comment]: <> (TODO: Openly looking for someone to add VSCode specific instructions.) diff --git a/test/integration/consul-container/test/ratelimit/ratelimit_test.go b/test/integration/consul-container/test/ratelimit/ratelimit_test.go index e3aa20e5ba841..18258c2ab8db4 100644 --- a/test/integration/consul-container/test/ratelimit/ratelimit_test.go +++ b/test/integration/consul-container/test/ratelimit/ratelimit_test.go @@ -32,6 +32,8 @@ const ( // - logs for exceeding func TestServerRequestRateLimit(t *testing.T) { + t.Parallel() + type action struct { function func(client *api.Client) error rateLimitOperation string @@ -50,7 +52,6 @@ func TestServerRequestRateLimit(t *testing.T) { mode string } - // getKV and putKV are net/RPC calls getKV := action{ function: func(client *api.Client) error { _, _, err := client.KV().Get("foo", &api.QueryOptions{}) @@ -98,13 +99,13 @@ func TestServerRequestRateLimit(t *testing.T) { action: putKV, expectedErrorMsg: "", expectExceededLog: true, - expectMetric: true, + expectMetric: false, }, { action: getKV, expectedErrorMsg: "", expectExceededLog: true, - expectMetric: true, + expectMetric: false, }, }, }, @@ -126,13 +127,10 @@ func TestServerRequestRateLimit(t *testing.T) { expectMetric: true, }, }, - }, - } + }} for _, tc := range testCases { - tc := tc t.Run(tc.description, func(t *testing.T) { - t.Parallel() clusterConfig := &libtopology.ClusterConfig{ NumServers: 1, NumClients: 0, @@ -146,9 +144,12 @@ func TestServerRequestRateLimit(t *testing.T) { ApplyDefaultProxySettings: false, } - cluster, client := setupClusterAndClient(t, clusterConfig, true) + cluster, _, _ := libtopology.NewCluster(t, clusterConfig) defer terminate(t, cluster) + client, err := cluster.GetClient(nil, true) + require.NoError(t, err) + // perform actions and validate returned errors to client for _, op := range tc.operations { err := op.action.function(client) @@ -164,14 +165,22 @@ func TestServerRequestRateLimit(t *testing.T) { // doing this in a separate loop so we can perform actions, allow metrics // and logs to collect and then assert on each. for _, op := range tc.operations { - timer := &retry.Timer{Timeout: 15 * time.Second, Wait: 500 * time.Millisecond} + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} retry.RunWith(timer, t, func(r *retry.R) { - checkForMetric(t, cluster, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode, op.expectMetric) + // validate metrics + metricsInfo, err := client.Agent().Metrics() + // TODO(NET-1978): currently returns NaN error + // require.NoError(t, err) + if metricsInfo != nil && err == nil { + if op.expectMetric { + checkForMetric(r, metricsInfo, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode) + } + } // validate logs // putting this last as there are cases where logs // were not present in consumer when assertion was made. - checkLogsForMessage(t, clusterConfig.LogConsumer.Msgs, + checkLogsForMessage(r, clusterConfig.LogConsumer.Msgs, fmt.Sprintf("[DEBUG] agent.server.rpc-rate-limit: RPC exceeded allowed rate limit: rpc=%s", op.action.rateLimitOperation), op.action.rateLimitOperation, "exceeded", op.expectExceededLog) @@ -181,65 +190,43 @@ func TestServerRequestRateLimit(t *testing.T) { } } -func setupClusterAndClient(t *testing.T, config *libtopology.ClusterConfig, isServer bool) (*libcluster.Cluster, *api.Client) { - cluster, _, _ := libtopology.NewCluster(t, config) - - client, err := cluster.GetClient(nil, isServer) - require.NoError(t, err) - - return cluster, client -} +func checkForMetric(t *retry.R, metricsInfo *api.MetricsInfo, operationName string, expectedLimitType string, expectedMode string) { + const counterName = "consul.rpc.rate_limit.exceeded" -func checkForMetric(t *testing.T, cluster *libcluster.Cluster, operationName string, expectedLimitType string, expectedMode string, expectMetric bool) { - // validate metrics - server, err := cluster.GetClient(nil, true) - require.NoError(t, err) - metricsInfo, err := server.Agent().Metrics() - // TODO(NET-1978): currently returns NaN error - // require.NoError(t, err) - if metricsInfo != nil && err == nil { - if expectMetric { - const counterName = "consul.rpc.rate_limit.exceeded" - - var counter api.SampledValue - for _, c := range metricsInfo.Counters { - if c.Name == counterName { - counter = c - break - } - } - require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName) + var counter api.SampledValue + for _, c := range metricsInfo.Counters { + if c.Name == counterName { + counter = c + break + } + } + require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName) - operation, ok := counter.Labels["op"] - require.True(t, ok) + operation, ok := counter.Labels["op"] + require.True(t, ok) - limitType, ok := counter.Labels["limit_type"] - require.True(t, ok) + limitType, ok := counter.Labels["limit_type"] + require.True(t, ok) - mode, ok := counter.Labels["mode"] - require.True(t, ok) + mode, ok := counter.Labels["mode"] + require.True(t, ok) - if operation == operationName { - require.GreaterOrEqual(t, counter.Count, 1) - require.Equal(t, expectedLimitType, limitType) - require.Equal(t, expectedMode, mode) - } - } + if operation == operationName { + require.GreaterOrEqual(t, counter.Count, 1) + require.Equal(t, expectedLimitType, limitType) + require.Equal(t, expectedMode, mode) } } -func checkLogsForMessage(t *testing.T, logs []string, msg string, operationName string, logType string, logShouldExist bool) { - if logShouldExist { - found := false - for _, log := range logs { - if strings.Contains(log, msg) { - found = true - break - } +func checkLogsForMessage(t *retry.R, logs []string, msg string, operationName string, logType string, logShouldExist bool) { + found := false + for _, log := range logs { + if strings.Contains(log, msg) { + found = true + break } - expectedLog := fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist) - require.Equal(t, logShouldExist, found, expectedLog) } + require.Equal(t, logShouldExist, found, fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist)) } func terminate(t *testing.T, cluster *libcluster.Cluster) { diff --git a/test/integration/consul-container/test/upgrade/catalog/catalog_test.go b/test/integration/consul-container/test/upgrade/catalog/catalog_test.go deleted file mode 100644 index ef2de3edeb24a..0000000000000 --- a/test/integration/consul-container/test/upgrade/catalog/catalog_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package catalog - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/internal/catalog/catalogtest" - "github.com/hashicorp/consul/proto-public/pbresource" - libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" - "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" - "github.com/hashicorp/consul/test/integration/consul-container/libs/utils" - "github.com/hashicorp/go-version" -) - -var minCatalogResourceVersion = version.Must(version.NewVersion("v1.16.0")) - -const ( - versionUndetermined = ` -Cannot determine the actual version the starting image represents. -Scrutinze test failures to ensure that the starting version should -actually be able to be used for creating the initial data set. - ` -) - -func maybeSkipUpgradeTest(t *testing.T, minVersion *version.Version) { - t.Helper() - - image := utils.DockerImage(utils.GetLatestImageName(), utils.LatestVersion) - latestVersion, err := utils.DockerImageVersion(image) - - if latestVersion != nil && latestVersion.LessThan(minVersion) { - t.Skipf("Upgrade test isn't applicable with version %q as the starting version", latestVersion.String()) - } - - if err != nil || latestVersion == nil { - t.Log(versionUndetermined) - } -} - -// Test upgrade a cluster of latest version to the target version and ensure that the catalog still -// functions properly. Note -func TestCatalogUpgrade(t *testing.T) { - maybeSkipUpgradeTest(t, minCatalogResourceVersion) - t.Parallel() - - const numServers = 1 - buildOpts := &libcluster.BuildOptions{ - ConsulImageName: utils.GetLatestImageName(), - ConsulVersion: utils.LatestVersion, - Datacenter: "dc1", - InjectAutoEncryption: true, - } - - cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{ - NumServers: 1, - BuildOpts: buildOpts, - ApplyDefaultProxySettings: true, - Cmd: `-hcl=experiments=["resource-apis"]`, - }) - - client := cluster.APIClient(0) - - libcluster.WaitForLeader(t, cluster, client) - libcluster.WaitForMembers(t, client, numServers) - - leader, err := cluster.Leader() - require.NoError(t, err) - rscClient := pbresource.NewResourceServiceClient(leader.GetGRPCConn()) - - // Initialize some data - catalogtest.PublishCatalogV1Alpha1IntegrationTestData(t, rscClient) - - // upgrade the cluster to the Target version - t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion) - err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion) - - require.NoError(t, err) - libcluster.WaitForLeader(t, cluster, client) - libcluster.WaitForMembers(t, client, numServers) - - catalogtest.VerifyCatalogV1Alpha1IntegrationTestResults(t, rscClient) -} diff --git a/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png b/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png deleted file mode 100644 index 2eae03da3b905..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png and /dev/null differ diff --git a/test/integration/consul-container/test/util/test_debug_configuration.png b/test/integration/consul-container/test/util/test_debug_configuration.png deleted file mode 100644 index 8fa19ba939985..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_configuration.png and /dev/null differ diff --git a/test/integration/consul-container/test/util/test_debug_info.png b/test/integration/consul-container/test/util/test_debug_info.png deleted file mode 100644 index a177999c0d95a..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_info.png and /dev/null differ diff --git a/test/integration/consul-container/test/util/test_debug_remote_configuration.png b/test/integration/consul-container/test/util/test_debug_remote_configuration.png deleted file mode 100644 index 01b14eada6d9a..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_remote_configuration.png and /dev/null differ diff --git a/test/integration/consul-container/test/util/test_debug_remote_connected.png b/test/integration/consul-container/test/util/test_debug_remote_connected.png deleted file mode 100644 index 52fc905ef29af..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_remote_connected.png and /dev/null differ diff --git a/test/integration/consul-container/test/util/test_debug_resume_program.png b/test/integration/consul-container/test/util/test_debug_resume_program.png deleted file mode 100644 index 99c2899019bb5..0000000000000 Binary files a/test/integration/consul-container/test/util/test_debug_resume_program.png and /dev/null differ diff --git a/tlsutil/config.go b/tlsutil/config.go index a52d6b6ad829b..5cdaf7633eca5 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -857,23 +857,10 @@ func (c *Configurator) IncomingHTTPSConfig() *tls.Config { return config } -// OutgoingTLSConfigForCheck creates a client *tls.Config for executing checks. -// It is RECOMMENDED that the serverName be left unspecified. The crypto/tls -// client will deduce the ServerName (for SNI) from the check address unless -// it's an IP (RFC 6066, Section 3). However, there are two instances where -// supplying a serverName is useful: -// -// 1. When the check address is an IP, a serverName can be supplied for SNI. -// Note: setting serverName will also override the hostname used to verify -// the certificate presented by the server being checked. -// -// 2. When the hostname in the check address won't be present in the SAN -// (Subject Alternative Name) field of the certificate presented by the -// server being checked. Note: setting serverName will also override the -// ServerName used for SNI. -// -// Setting skipVerify will disable verification of the server's certificate -// chain and hostname, which is generally not suitable for production use. +// OutgoingTLSConfigForCheck generates a *tls.Config for outgoing TLS connections +// for checks. This function is separated because there is an extra flag to +// consider for checks. EnableAgentTLSForChecks and InsecureSkipVerify has to +// be checked for checks. func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool, serverName string) *tls.Config { c.log("OutgoingTLSConfigForCheck") @@ -888,9 +875,13 @@ func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool, serverName str } } + if serverName == "" { + serverName = c.serverNameOrNodeName() + } config := c.internalRPCTLSConfig(false) config.InsecureSkipVerify = skipVerify config.ServerName = serverName + return config } diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index 721198afe83bf..30ebd62c206b7 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -1376,7 +1376,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { }, }, { - name: "agent tls, default consul server name, no override", + name: "agent tls, default server name", conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ @@ -1389,11 +1389,11 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { }, expected: &tls.Config{ MinVersion: tls.VersionTLS12, - ServerName: "", + ServerName: "servername", }, }, { - name: "agent tls, skip verify, consul node name for server name, no override", + name: "agent tls, skip verify, node name for server name", conf: func() (*Configurator, error) { return NewConfigurator(Config{ InternalRPC: ProtocolConfig{ @@ -1407,7 +1407,7 @@ func TestConfigurator_OutgoingTLSConfigForCheck(t *testing.T) { expected: &tls.Config{ InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, - ServerName: "", + ServerName: "nodename", }, }, { diff --git a/troubleshoot/go.mod b/troubleshoot/go.mod index 1a6ca3559a639..1b9c0e274b57d 100644 --- a/troubleshoot/go.mod +++ b/troubleshoot/go.mod @@ -14,8 +14,8 @@ exclude ( require ( github.com/envoyproxy/go-control-plane v0.11.0 github.com/envoyproxy/go-control-plane/xdsmatcher v0.0.0-20230524161521-aaaacbfbe53e - github.com/hashicorp/consul/api v1.20.0 - github.com/hashicorp/consul/envoyextensions v0.1.2 + github.com/hashicorp/consul/api v1.22.0-rc1 + github.com/hashicorp/consul/envoyextensions v0.3.0-rc1 github.com/stretchr/testify v1.8.3 google.golang.org/protobuf v1.30.0 ) @@ -43,7 +43,6 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/net v0.10.0 // indirect diff --git a/troubleshoot/go.sum b/troubleshoot/go.sum index dc482f3d5ecc4..a76178464c684 100644 --- a/troubleshoot/go.sum +++ b/troubleshoot/go.sum @@ -161,7 +161,11 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/api v1.22.0-rc1 h1:ePmGqndeMgaI38KUbSA/CqTzeEAIogXyWnfNJzglo70= +github.com/hashicorp/consul/api v1.22.0-rc1/go.mod h1:wtduXtbAqSGtBdi3tyA5SSAYGAG51rBejV9SEUBciMY= +github.com/hashicorp/consul/envoyextensions v0.3.0-rc1 h1:weclrwjvLeX+vxPOyo4b4dCDxSpnDl60Z9K16nnCVnI= +github.com/hashicorp/consul/envoyextensions v0.3.0-rc1/go.mod h1:ckxoPHMiWXAe6dhyxmKsX1XqO4KTV64KWIyTu44z8UI= +github.com/hashicorp/consul/sdk v0.14.0-rc1 h1:PuETOfN0uxl28i0Pq6rK7TBCrIl7psMbL0YTSje4KvM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -209,8 +213,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -271,8 +275,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -584,8 +586,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index 129686384002d..da76128e88ea0 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -152,7 +152,7 @@ dc:
{items, select, 0 {Services must be exported from one peer to another to enable service communication across two peers. There don't seem to be any services imported from {name} yet, or you may not have services:read permissions to access to this view.} - other {No services were found matching that search, or you may not have access to view the services you are searching for.} + other {No services where found matching that search, or you may not have access to view the services you are searching for.} }
exported: @@ -162,7 +162,7 @@ dc:
{items, select, 0 {Services must be exported from one peer to another to enable service communication across two peers. There don't seem to be any services exported to {name} yet, or you may not have services:read permissions to access to this view.} - other {No services were found matching that search, or you may not have access to view the services you are searching for.} + other {No services where found matching that search, or you may not have access to view the services you are searching for.} }
diff --git a/version/VERSION b/version/VERSION index ee8855caa4a79..bdae8d41643d2 100644 --- a/version/VERSION +++ b/version/VERSION @@ -1 +1 @@ -1.17.0-dev +1.16.1-dev diff --git a/website/content/commands/debug.mdx b/website/content/commands/debug.mdx index 1514158ff9074..bebbe955a294b 100644 --- a/website/content/commands/debug.mdx +++ b/website/content/commands/debug.mdx @@ -80,7 +80,7 @@ information when `debug` is running. By default, it captures all information. | `members` | A list of all the WAN and LAN members in the cluster. | | `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. | | `logs` | `TRACE` level logs for the target agent, captured for the duration. | -| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enabled and an ACL token with `operator:read` is provided. | +| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | ## Examples diff --git a/website/content/commands/watch.mdx b/website/content/commands/watch.mdx index 806864dae9539..da32cdefdc4d0 100644 --- a/website/content/commands/watch.mdx +++ b/website/content/commands/watch.mdx @@ -53,11 +53,6 @@ or optionally provided. There is more documentation on watch - `-type` - Watch type. Required, one of "`key`, `keyprefix`, `services`, `nodes`, `service`, `checks`, or `event`. -- `-filter=` - Expression to use for filtering the results. Optional for - `checks` `nodes`, `services`, and `service` type. - See the [`/catalog/nodes` API documentation](/consul/api-docs/catalog#filtering) for a - description of what is filterable. - #### API Options @include 'http_api_options_client.mdx' diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 8d46b63bd0ce6..4183a5a7d2135 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -472,7 +472,8 @@ Refer to the [formatting specification](https://golang.org/pkg/time/#ParseDurati that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. -- `enable_debug` (boolean, default is `false`): When set to `true`, enables Consul to report additional debugging information, including runtime profiling (`pprof`) data. This setting is only required for clusters without ACL [enabled](#acl_enabled). If you change this setting, you must restart the agent for the change to take effect. +- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to + access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. - `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/consul/docs/agent/config/cli-flags#_enable_script_checks). @@ -2094,12 +2095,6 @@ specially crafted certificate signed by the CA can be used to gain full access t * `TLSv1_2` (default) * `TLSv1_3` - - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When - set to true, Consul verifies the TLS certificate presented by the servers - match the hostname `server..`. By default this is false, - and Consul does not verify the hostname of the certificate, only that it - is signed by a trusted CA. - **WARNING: TLS 1.1 and lower are generally considered less secure and should not be used if possible.** @@ -2207,7 +2202,7 @@ specially crafted certificate signed by the CA can be used to gain full access t only way to enforce that no client can communicate with a server unencrypted is to also enable `verify_incoming` which requires client certificates too. - - `verify_server_hostname` Overrides [tls.defaults.verify_server_hostname](#tls_defaults_verify_server_hostname). When + - `verify_server_hostname` ((#tls_internal_rpc_verify_server_hostname)) When set to true, Consul verifies the TLS certificate presented by the servers match the hostname `server..`. By default this is false, and Consul does not verify the hostname of the certificate, only that it @@ -2291,6 +2286,9 @@ tls { ca_file = "/etc/pki/tls/certs/ca-bundle.crt" verify_incoming = true verify_outgoing = true + } + + internal_rpc { verify_server_hostname = true } } @@ -2319,7 +2317,9 @@ tls { "cert_file": "/etc/pki/tls/certs/my.crt", "ca_file": "/etc/pki/tls/certs/ca-bundle.crt", "verify_incoming": true, - "verify_outgoing": true, + "verify_outgoing": true + }, + "internal_rpc": { "verify_server_hostname": true } } diff --git a/website/content/docs/agent/index.mdx b/website/content/docs/agent/index.mdx index b5a06b39e6409..ec68e0a1ce1fa 100644 --- a/website/content/docs/agent/index.mdx +++ b/website/content/docs/agent/index.mdx @@ -276,6 +276,9 @@ tls { ca_file = "/consul/config/certs/consul-agent-ca.pem" cert_file = "/consul/config/certs/dc1-server-consul-0.pem" key_file = "/consul/config/certs/dc1-server-consul-0-key.pem" + } + + internal_rpc { verify_server_hostname = true } } diff --git a/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx b/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx index ae7c5e769ce5f..58e7479012869 100644 --- a/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx +++ b/website/content/docs/agent/limits/usage/limit-request-rates-from-ips.mdx @@ -10,7 +10,7 @@ This topic describes how to configure RPC and gRPC traffic rate limits for sourc -This feature requires Consul Enterprise. Refer to the [feature compatibility matrix](/consul/docs/enterprise#consul-enterprise-feature-availability) for additional information. +This feature requires Consul Enterprise. Refer to the [feature compatibility matrix](/consul/docs/v1.16.x/enterprise#consul-enterprise-feature-availability) for additional information. diff --git a/website/content/docs/agent/rpc.mdx b/website/content/docs/agent/rpc.mdx new file mode 100644 index 0000000000000..bdff4a05fc5e1 --- /dev/null +++ b/website/content/docs/agent/rpc.mdx @@ -0,0 +1,260 @@ +--- +layout: docs +page_title: Legacy RPC Protocol +description: >- + Consul agents originally could be controlled through the RPC protocol. This feature was deprecated in version 0.8 in favor of the HTTP API. Learn about agent RPC interactions and how they worked. +--- + +# RPC Protocol + +~> The RPC Protocol is deprecated and support was removed in Consul +0.8. Please use the [HTTP API](/consul/api-docs), which has +support for all features of the RPC Protocol. + +The Consul agent provides a complete RPC mechanism that can +be used to control the agent programmatically. This RPC +mechanism is the same one used by the CLI but can be +used by other applications to easily leverage the power +of Consul without directly embedding. + +It is important to note that the RPC protocol does not support +all the same operations as the [HTTP API](/consul/api-docs). + +## Implementation Details + +The RPC protocol is implemented using [MsgPack](http://msgpack.org/) +over TCP. This choice was driven by the fact that all operating +systems support TCP, and MsgPack provides a fast serialization format +that is broadly available across languages. + +All RPC requests have a request header, and some requests have +a request body. The request header looks like: + +```javascript +{ + "Command": "Handshake", + "Seq": 0 +} +``` + +All responses have a response header, and some may contain +a response body. The response header looks like: + +```javascript +{ + "Seq": 0, + "Error": "" +} +``` + +The `Command` in the request is used to specify what command the server should +run, and the `Seq` is used to track the request. Responses are +tagged with the same `Seq` as the request. This allows for some +concurrency on the server side as requests are not purely FIFO. +Thus, the `Seq` value should not be re-used between commands. +All responses may be accompanied by an error. + +Possible commands include: + +- handshake - Initializes the connection and sets the version +- force-leave - Removes a failed node from the cluster +- join - Requests Consul join another node +- members-lan - Returns the list of LAN members +- members-wan - Returns the list of WAN members +- monitor - Starts streaming logs over the connection +- stop - Stops streaming logs +- leave - Instructs the Consul agent to perform a graceful leave and shutdown +- stats - Provides various debugging statistics +- reload - Triggers a configuration reload + +Each command is documented below along with any request or +response body that is applicable. + +### handshake + +This command is used to initialize an RPC connection. As it informs +the server which version the client is using, handshake MUST be the +first command sent. + +The request header must be followed by a handshake body, like: + +```javascript +{ + "Version": 1 +} +``` + +The body specifies the IPC version being used; however, only version +1 is currently supported. This is to ensure backwards compatibility +in the future. + +There is no special response body, but the client should wait for the +response and check for an error. + +### force-leave + +This command is used to remove failed nodes from a cluster. It takes +the following body: + +```javascript +{ + "Node": "failed-node-name" +} +``` + +There is no special response body. + +### join + +This command is used to join an existing cluster using one or more known nodes. +It takes the following body: + +```javascript +{ + "Existing": [ + "192.168.0.1:6000", + "192.168.0.2:6000" + ], + "WAN": false +} +``` + +The `Existing` nodes are each contacted, and `WAN` controls if we are adding a +WAN member or LAN member. LAN members are expected to be in the same datacenter +and should be accessible at relatively low latencies. WAN members are expected to +be operating in different datacenters with relatively high access latencies. It is +important that only agents running in "server" mode are able to join nodes over the +WAN. + +The response contains both a header and body. The body looks like: + +```javascript +{ + "Num": 2 +} +``` + +'Num' indicates the number of nodes successfully joined. + +### members-lan + +This command is used to return all the known LAN members and associated +information. All agents will respond to this command. + +There is no request body, but the response looks like: + +```javascript +{ + "Members": [ + { + "Name": "TestNode" + "Addr": [127, 0, 0, 1], + "Port": 5000, + "Tags": { + "role": "test" + }, + "Status": "alive", + "ProtocolMin": 0, + "ProtocolMax": 3, + "ProtocolCur": 2, + "DelegateMin": 0, + "DelegateMax": 1, + "DelegateCur": 1, + }, + ... + ] +} +``` + +### members-wan + +This command is used to return all the known WAN members and associated +information. Only agents in server mode will respond to this command. + +There is no request body, and the response is the same as `members-lan` + +### monitor + +The monitor command subscribes the channel to log messages from the Agent. + +The request looks like: + +```javascript +{ + "LogLevel": "DEBUG" +} +``` + +This subscribes the client to all messages of at least DEBUG level. + +The server will respond with a standard response header indicating if the monitor +was successful. If so, any future logs will be sent and tagged with +the same `Seq` as in the `monitor` request. + +Assume we issued the previous monitor command with `"Seq": 50`. We may start +getting messages like: + +```javascript +{ + "Seq": 50, + "Error": "" +} + +{ + "Log": "2013/12/03 13:06:53 [INFO] agent: Received event: member-join" +} +``` + +It is important to realize that these messages are sent asynchronously +and not in response to any command. If a client is streaming +commands, there may be logs streamed while a client is waiting for a +response to a command. This is why the `Seq` must be used to pair requests +with their corresponding responses. + +The client can only be subscribed to at most a single monitor instance. +To stop streaming, the `stop` command is used. + +### stop + +This command stops a monitor. + +The request looks like: + +```javascript +{ + "Stop": 50 +} +``` + +This unsubscribes the client from the monitor with `Seq` value of 50. + +There is no response body. + +### leave + +This command is used to trigger a graceful leave and shutdown. +There is no request body or response body. + +### stats + +This command provides debug information. There is no request body, and the +response body looks like: + +```javascript +{ + "agent": { + "check_monitors": 0, + ... + }, + "consul: { + "server": "true", + ... + }, + ... +} +``` + +### reload + +This command is used to trigger a reload of configurations. +There is no request body or response body. diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index eae1c1aa4239d..29d22bfcc59d4 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -480,8 +480,10 @@ These metrics are used to monitor the health of the Consul servers. | `consul.raft.leader.dispatchNumLogs` | Measures the number of logs committed to disk in a batch. | logs | gauge | | `consul.raft.logstore.verifier.checkpoints_written` | Counts the number of checkpoint entries written to the LogStore. | checkpoints | counter | | `consul.raft.logstore.verifier.dropped_reports` | Counts how many times the verifier routine was still busy when the next checksum came in and so verification for a range was skipped. If you see this happen, consider increasing the interval between checkpoints with [`raft_logstore.verification.interval`](/consul/docs/agent/config/config-files#raft_logstore_verification) | reports dropped | counter | -| `consul.raft.logstore.verifier.ranges_verified` | Counts the number of log ranges for which a verification report has been completed. Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for more information. | log ranges verifications | counter | -| `consul.raft.logstore.verifier.read_checksum_failures` | Counts the number of times a range of logs between two check points contained at least one disk corruption. Refer to [Monitor Raft metrics and logs for WAL](/consul/docs/agent/wal-logstore/monitoring) for more information. | disk corruptions | counter | +| `consul.raft.logstore.verifier.ranges_verified` | Counts the number of log ranges for which a verification report has been completed. Refer to [Monitor Raft metrics and logs for WAL +](/consul/docs/agent/wal-logstore/monitoring) for more information. | log ranges verifications | counter | +| `consul.raft.logstore.verifier.read_checksum_failures` | Counts the number of times a range of logs between two check points contained at least one disk corruption. Refer to [Monitor Raft metrics and logs for WAL +](/consul/docs/agent/wal-logstore/monitoring) for more information. | disk corruptions | counter | | `consul.raft.logstore.verifier.write_checksum_failures` | Counts the number of times a follower has a different checksum to the leader at the point where it writes to the log. This could be caused by either a disk-corruption on the leader (unlikely) or some other corruption of the log entries in-flight. | in-flight corruptions | counter | | `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. It can be used as a measure for how stable the Raft timing is and how close the leader is to timing out its lease.The lease timeout is 500 ms times the [`raft_multiplier` configuration](/consul/docs/agent/config/config-files#raft_multiplier), so this telemetry value should not be getting close to that configured value, otherwise the Raft timing is marginal and might need to be tuned, or more powerful servers might be needed. See the [Server Performance](/consul/docs/install/performance) guide for more details. | ms | timer | | `consul.raft.leader.oldestLogAge` | The number of milliseconds since the _oldest_ log in the leader's log store was written. This can be important for replication health where write rate is high and the snapshot is large as followers may be unable to recover from a restart if restoring takes longer than the minimum value for the current leader. Compare this with `consul.raft.fsm.lastRestoreDuration` and `consul.raft.rpc.installSnapshot` to monitor. In normal usage this gauge value will grow linearly over time until a snapshot completes on the leader and the log is truncated. Note: this metric won't be emitted until the leader writes a snapshot. After an upgrade to Consul 1.10.0 it won't be emitted until the oldest log was written after the upgrade. | ms | gauge | diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx index c49e07516fae6..13cc56c72d35d 100644 --- a/website/content/docs/connect/ca/index.mdx +++ b/website/content/docs/connect/ca/index.mdx @@ -21,7 +21,7 @@ support for using [Vault as a CA](/consul/docs/connect/ca/vault). With Vault, the root certificate and private key material remain with the Vault cluster. -## CA and Certificate relationship +### CA and Certificate relationship This diagram shows the relationship between the CA certificates in a Consul primary datacenter and a secondary Consul datacenter. @@ -34,22 +34,9 @@ services. - the Leaf Cert Client Agent is created by auto-encrypt and auto-config. It is used by client agents for HTTP API TLS, and for mTLS for RPC requests to servers. -Any secondary datacenters use their CA provider to generate an intermediate certificate -signing request (CSR) to be signed by the primary root CA. They receive an intermediate -CA certificate, which is used to sign leaf certificates in the secondary datacenter. - -You can use different providers across primary and secondary datacenters. -For example, an operator may use a Vault CA provider for extra security in the primary -datacenter but choose to use the built-in CA provider in the secondary datacenter, which -may not have a reachable Vault cluster. The following table compares the built-in and Vault providers. - -## CA Provider Comparison - -| | Consul built-in | Vault | -|------------|------------------------------------|-----------------------------------------------------------------------------------| -| Security | CA private keys are stored on disk | CA private keys are stored in Vault and are never exposed to Consul server agents | -| Resiliency | No dependency on external systems. If Consul is available, it can sign certificates | Dependent on Vault availability | -| Latency | Consul signs certificates locally | A network call to Vault is required to sign certificates | +Any secondary datacenters receive an intermediate certificate, signed by the Primary Root +CA, which is used as the CA certificate to sign leaf certificates in the secondary +datacenter. ## CA Bootstrapping diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index 828a6937cae18..ce35744e9242f 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -7,27 +7,19 @@ description: >- # Vault as a Service Mesh Certificate Authority -You can configure Consul to use [Vault](/vault) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh. -The Vault CA provider uses the [Vault PKI secrets engine](/vault/docs/secrets/pki) to generate and sign certificates. +You can configure Consul to use [Vault](https://www.vaultproject.io/) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh. +The Vault CA provider uses the [Vault PKI secrets engine](/vault/docs/secrets/pki) to generate and sign certificates. This page describes how configure the Vault CA provider. > **Tutorial:** Complete the [Vault as Consul Service Mesh Certification Authority](/consul/tutorials/vault-secure/vault-pki-consul-connect-ca) tutorial for hands-on guidance on how to configure Vault as the Consul service mesh certification authority. ## Requirements -- Vault 0.10.3 or higher - -~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem. - -## Recommendations - - Refer to [Service Mesh Certificate Authority Overview](/consul/docs/connect/ca) for important background information about how Consul manages certificates with configurable CA providers. -- For best performance and resiliency, every datacenter should have a Vault cluster local to its Consul cluster. +- Vault 0.10.3 to 1.10.x. -- If your Consul datacenters are WAN-federated and the secondary datacenter uses Vault Enterprise - [performance secondaries](/vault/docs/enterprise/replication#performance-replication), we recommend - configuring [`local`](/vault/docs/enterprise/replication#local) mounts for their [`intermediate_pki_path`](/consul/docs/connect/ca/vault#intermediatepkipath). +~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem. ## Enable Vault as the CA @@ -36,7 +28,7 @@ and including the required provider configuration options. You can provide the CA configuration in the server agents' configuration file or in the body of a `PUT` request to the [`/connect/ca/configuration`](/consul/api-docs/connect/ca#update-ca-configuration) API endpoint. -Refer to the [Configuration Reference](#configuration-reference) for details about configuration options and for example use cases. +Refer to the [Configuration Reference](#configuration-reference) for details about configuration options and for example use cases. The following example shows the required configurations for a default implementation: @@ -83,7 +75,7 @@ connect { You can specify the following configuration options. Note that a configuration option's name may differ between API calls and the agent configuration file. The first key refers to the option name for use in API calls. -The key after the slash refers to the corresponding option name in the agent configuration file. +The key after the slash refers to the corresponding option name in the agent configuration file. - `Address` / `address` (`string: `) - The address of the Vault server. @@ -112,8 +104,7 @@ The key after the slash refers to the corresponding option name in the agent con Only the authentication related fields (for example, JWT's `path` and `role`) are supported. The optional management fields (for example: `remove_jwt_after_reading`) are not supported. - `RootPKIPath` / `root_pki_path` (`string: `) - The path to - a PKI secrets engine for the root certificate. Required for primary - datacenters. Secondary datacenters do not use this path. + a PKI secrets engine for the root certificate. If the path does not exist, Consul will mount a new PKI secrets engine at the specified path with the @@ -123,6 +114,9 @@ The key after the slash refers to the corresponding option name in the agent con the root certificate TTL was set to 8760 hour, or 1 year, and was not configurable. The root certificate will expire at the end of the specified period. + When WAN Federation is enabled, each secondary datacenter must use the same Vault cluster and share the same `root_pki_path` + with the primary datacenter. + To use an intermediate certificate as the primary CA in Consul, initialize the `RootPKIPath` in Vault with a PEM bundle. The first certificate in the bundle must be the intermediate certificate that Consul will use as the primary CA. @@ -139,10 +133,8 @@ The key after the slash refers to the corresponding option name in the agent con path does not exist, Consul will attempt to mount and configure this automatically. - When WAN federation is enabled, every secondary datacenter that shares a common Vault cluster - must specify a unique `intermediate_pki_path`. If a Vault cluster is not used by more than one Consul datacenter, - then you do not need to specify a unique value for the `intermediate_pki_path`. We still recommend using a - unique `intermediate_pki_path` for each datacenter, however, to improve operational and diagnostic clarity. + When WAN Federation is enabled, every secondary + datacenter must specify a unique `intermediate_pki_path`. - `IntermediatePKINamespace` / `intermediate_pki_namespace` (`string: `) - The absolute namespace that the `IntermediatePKIPath` is in. Setting this parameter overrides the `Namespace` option for the `IntermediatePKIPath`. Introduced in 1.12.3. @@ -250,7 +242,7 @@ Then, attach the following Vault ACL policy to the CA provider's path "//" { capabilities = [ "read" ] } - + path "//root/sign-intermediate" { capabilities = [ "update" ] } @@ -276,7 +268,7 @@ Then, attach the following Vault ACL policy to the CA provider's capabilities = [ "read" ] } ``` - + #### Define a policy for Consul-managed PKI paths ((#consul-managed-pki-paths)) @@ -337,7 +329,7 @@ Then, attach the following Vault ACL policy to the CA provider's capabilities = [ "read" ] } ``` - + #### Additional Vault ACL policies for sensitive operations @@ -348,7 +340,7 @@ following CA provider configuration changes: - Changing the `RootPKIPath` Those configuration modifications trigger a root CA change that requires an -extremely privileged root cross-sign operation. +extremely privileged root cross-sign operation. For that operation to succeed, the CA provider's [Vault token](#token) or [auth method](#authmethod) must contain the following rule: diff --git a/website/content/docs/connect/config-entries/control-plane-request-limit.mdx b/website/content/docs/connect/config-entries/control-plane-request-limit.mdx index 21b06f6533c13..7d36b127bbd9f 100644 --- a/website/content/docs/connect/config-entries/control-plane-request-limit.mdx +++ b/website/content/docs/connect/config-entries/control-plane-request-limit.mdx @@ -10,7 +10,7 @@ This topic describes the configuration options for the `control-plane-request-li -This feature requires Consul Enterprise. Refer to the [feature compatibility matrix](/consul/docs/enterprise#consul-enterprise-feature-availability) for additional information. +This feature requires Consul Enterprise. Refer to the [feature compatibility matrix](/consul/docs/v1.16.x/enterprise#consul-enterprise-feature-availability) for additional information. diff --git a/website/content/docs/connect/config-entries/service-intentions.mdx b/website/content/docs/connect/config-entries/service-intentions.mdx index 180e3aaabd960..15e41314ba98b 100644 --- a/website/content/docs/connect/config-entries/service-intentions.mdx +++ b/website/content/docs/connect/config-entries/service-intentions.mdx @@ -1567,4 +1567,4 @@ Sources = [ } ``` - \ No newline at end of file + diff --git a/website/content/docs/connect/failover/index.mdx b/website/content/docs/connect/failover/index.mdx index a4c51b7997314..dd1591d469f6a 100644 --- a/website/content/docs/connect/failover/index.mdx +++ b/website/content/docs/connect/failover/index.mdx @@ -21,11 +21,9 @@ The following table compares these strategies in deployments with multiple datac | Failover Strategy | Supports WAN Federation | Supports Cluster Peering | Multi-Datacenter Failover Strength | Multi-Datacenter Usage Scenario | | :---------------: | :---------------------: | :----------------------: | :--------------------------------- | :------------------------------ | | `Failover` stanza | ✅ | ✅ | Enables more granular logic for failover targeting | Configuring failover for a single service or service subset, especially for testing or debugging purposes | -| Prepared query | ✅ | ❌ | Central policies that can automatically target the nearest datacenter | WAN-federated deployments where a primary datacenter is configured. | +| Prepared query | ✅ | ❌ | Central policies that can automatically target the nearest datacenter | WAN-federated deployments where a primary datacenter is configured. Prepared queries are not replicated over peer connections. | | Sameness groups | ❌ | ✅ | Group size changes without edits to existing member configurations | Cluster peering deployments with consistently named services and namespaces | -Although cluster peering connections support the [`Failover` field of the prepared query request schema](/consul/api-docs/query#failover) when using Consul's service discovery features to [perform dynamic DNS queries](/consul/docs/services/discovery/dns-dynamic-lookups), they do not support prepared queries for service mesh failover scenarios. - ### Failover configurations for a service mesh with a single datacenter You can implement a service resolver configuration entry and specify a pool of failover service instances that other services can exchange messages with when the primary service becomes unhealthy or unreachable. We recommend adopting this strategy as a minimum baseline when implementing Consul service mesh and layering additional failover strategies to build resilience into your application network. @@ -34,9 +32,9 @@ Refer to the [`Failover` configuration ](/consul/docs/connect/config-entries/ser ### Failover configuration for WAN-federated datacenters -If your network has multiple Consul datacenters that are WAN-federated, you can configure your applications to look for failover services with prepared queries. [Prepared queries](/consul/api-docs/) are configurations that enable you to define complex service discovery lookups. This strategy hinges on the secondary datacenter containing service instances that have the same name and residing in the same namespace as their counterparts in the primary datacenter. +If your network has multiple Consul datacenters that are WAN-federated, you can configure your applications to look for failover services with prepared queries. [Prepared queries](/consul/api-docs/) are configurations that enable you to define complex service discovery lookups. This strategy hinges on the secondary datacenter containing service instances that have the same name and residing in the same namespace as their counterparts in the primary datacenter. -Refer to the [Automate geo-failover with prepared queries tutorial](/consul/tutorials/developer-discovery/automate-geo-failover) for additional information. +Refer to the [Automate geo-failover with prepared queries tutorial](/consul/tutorials/developer-discovery/automate-geo-failover) for additional information. ### Failover configuration for peered clusters and partitions diff --git a/website/content/docs/enterprise/index.mdx b/website/content/docs/enterprise/index.mdx index 273be253a0d21..3295ccc9504ad 100644 --- a/website/content/docs/enterprise/index.mdx +++ b/website/content/docs/enterprise/index.mdx @@ -86,7 +86,7 @@ Available Enterprise features per Consul form and license include: | [Redundancy Zones](/consul/docs/enterprise/redundancy) | Not applicable | Yes | With Global Visibility, Routing, and Scale module | | [Sameness Groups](/consul/docs/connect/config-entries/samenes-group) | No | Yes | N/A | | [Sentinel for KV](/consul/docs/enterprise/sentinel) | All tiers | Yes | With Governance and Policy module | -| [Server request rate limits per source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips) | All tiers | Yes | With Governance and Policy module | +| [Server request rate limits per source IP](/consul/docs/v1.16.x/agent/limits/usage/limit-request-rates-from-ips) | All tiers | Yes | With Governance and Policy module | [HashiCorp Cloud Platform (HCP) Consul]: https://cloud.hashicorp.com/products/consul @@ -114,7 +114,7 @@ Consul Enterprise feature availability can change depending on your server and c | [Redundancy Zones](/consul/docs/enterprise/redundancy) | ✅ | ✅ | ✅ | | [Sameness Groups](/consul/docs/connect/config-entries/samenes-group) | ✅ | ✅ | ✅ | | [Sentinel ](/consul/docs/enterprise/sentinel) | ✅ | ✅ | ✅ | -| [Server request rate limits per source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | +| [Server request rate limits per source IP](/consul/docs/v1.16.x/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | @@ -134,7 +134,7 @@ Consul Enterprise feature availability can change depending on your server and c | [Redundancy Zones](/consul/docs/enterprise/redundancy) | ❌ | ❌ | ❌ | | [Sameness Groups](/consul/docs/connect/config-entries/samenes-group) | ✅ | ✅ | ✅ | | [Sentinel ](/consul/docs/enterprise/sentinel) | ✅ | ✅ | ✅ | -| [Server request rate limits per source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | +| [Server request rate limits per source IP](/consul/docs/v1.16.x/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | @@ -154,7 +154,7 @@ Consul Enterprise feature availability can change depending on your server and c | [Redundancy Zones](/consul/docs/enterprise/redundancy) | n/a | n/a | n/a | | [Sameness Groups](/consul/docs/connect/config-entries/samenes-group) | ✅ | ✅ | ✅ | | [Sentinel ](/consul/docs/enterprise/sentinel) | ✅ | ✅ | ✅ | -| [Server request rate limits per source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | +| [Server request rate limits per source IP](/consul/docs/v1.16.x/agent/limits/usage/limit-request-rates-from-ips) | ✅ | ✅ | ✅ | \ No newline at end of file diff --git a/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx b/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx index cfe4ba7aebc5d..2d27a4f369fc9 100644 --- a/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx +++ b/website/content/docs/k8s/connect/cluster-peering/tech-specs.mdx @@ -41,7 +41,7 @@ Refer to the following example Helm configuration: ```yaml global: name: consul - image: "hashicorp/consul:1.14.1" + image: "hashicorp/consul:1.16.0" peering: enabled: true tls: @@ -166,4 +166,4 @@ If ACLs are enabled, you must add tokens to grant the following permissions: - Grant `service:write` permissions to services that define mesh gateways in their server definition. - Grant `service:read` permissions for all services on the partition. -- Grant `mesh:write` permissions to the mesh gateways that participate in cluster peering connections. This permission allows a leaf certificate to be issued for mesh gateways to terminate TLS sessions for HTTP requests. \ No newline at end of file +- Grant `mesh:write` permissions to the mesh gateways that participate in cluster peering connections. This permission allows a leaf certificate to be issued for mesh gateways to terminate TLS sessions for HTTP requests. diff --git a/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx b/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx index 167d4fdcecede..375886132e501 100644 --- a/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx +++ b/website/content/docs/k8s/connect/cluster-peering/usage/establish-peering.mdx @@ -48,7 +48,7 @@ After you provision a Kubernetes cluster and set up your kubeconfig file to mana $ export CLUSTER2_CONTEXT= ``` -### Update the Helm chart +### Install Consul using Helm and configure peering over mesh gateways To use cluster peering with Consul on Kubernetes deployments, update the Helm chart with [the required values](/consul/docs/k8s/connect/cluster-peering/tech-specs#helm-requirements). After updating the Helm chart, you can use the `consul-k8s` CLI to apply `values.yaml` to each cluster. @@ -59,7 +59,7 @@ To use cluster peering with Consul on Kubernetes deployments, update the Helm ch ``` ```shell-session - $ helm install ${HELM_RELEASE_NAME1} hashicorp/consul --create-namespace --namespace consul --version "1.0.1" --values values.yaml --set global.datacenter=dc1 --kube-context $CLUSTER1_CONTEXT + $ helm install ${HELM_RELEASE_NAME1} hashicorp/consul --create-namespace --namespace consul --version "1.2.0" --values values.yaml --set global.datacenter=dc1 --kube-context $CLUSTER1_CONTEXT ``` 1. In `cluster-02`, run the following commands: @@ -69,9 +69,11 @@ To use cluster peering with Consul on Kubernetes deployments, update the Helm ch ``` ```shell-session - $ helm install ${HELM_RELEASE_NAME2} hashicorp/consul --create-namespace --namespace consul --version "1.0.1" --values values.yaml --set global.datacenter=dc2 --kube-context $CLUSTER2_CONTEXT + $ helm install ${HELM_RELEASE_NAME2} hashicorp/consul --create-namespace --namespace consul --version "1.2.0" --values values.yaml --set global.datacenter=dc2 --kube-context $CLUSTER2_CONTEXT ``` +1. For both clusters apply the `Mesh` configuration entry values provided in [Mesh Gateway Specifications](/consul/docs/k8s/connect/cluster-peering/tech-specs#mesh-gateway-specifications) to allow establishing peering connections over mesh gateways. + ### Configure the mesh gateway mode for traffic between services In Kubernetes deployments, you can configure mesh gateways to use `local` mode so that a service dialing a service in a remote peer dials the remote mesh gateway instead of the local mesh gateway. To configure the mesh gateway mode so that this traffic always leaves through the local mesh gateway, you can use the `ProxyDefaults` CRD. @@ -452,4 +454,4 @@ For Consul Enterprise, the permissions apply to all imported services in the ser Refer to [Reading servers](/consul/docs/connect/config-entries/exported-services#reading-services) in the `exported-services` configuration entry documentation for example rules. -For additional information about how to configure and use ACLs, refer to [ACLs system overview](/consul/docs/security/acl). \ No newline at end of file +For additional information about how to configure and use ACLs, refer to [ACLs system overview](/consul/docs/security/acl). diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index 57096a4a29a8e..8f45e2ab176cb 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -35,10 +35,10 @@ When transparent proxy mode is enabled, all service-to-service traffic is requir The following configurations are examples for registering workloads on Kubernetes into Consul's service mesh in different scenarios. Each scenario provides an example Kubernetes manifest to demonstrate how to use Consul's service mesh with a specific Kubernetes workload type. -- [Kubernetes Pods running as a deployment](#kubernetes-pods-running-as-a-deployment) -- [Connecting to mesh-enabled Services](#connecting-to-mesh-enabled-services) -- [Kubernetes Jobs](#kubernetes-jobs) -- [Kubernetes Pods with multiple ports](#kubernetes-pods-with-multiple-ports) + - [Kubernetes Pods running as a deployment](#kubernetes-pods-running-as-a-deployment) + - [Connecting to mesh-enabled Services](#connecting-to-mesh-enabled-services) + - [Kubernetes Jobs](#kubernetes-jobs) + - [Kubernetes Pods with multiple ports](#kubernetes-pods-with-multiple-ports) #### Kubernetes Pods running as a deployment @@ -106,6 +106,7 @@ of establishing connections to our previous example "static-server" service. The connection to this static text service happens over an authorized and encrypted connection via service mesh. + ```yaml @@ -266,7 +267,8 @@ NAME COMPLETIONS DURATION AGE test-job 1/1 30s 4m31s ``` -In addition, based on the logs emitted by the pod you can verify that the proxy was shut down before the Job completed. +In addition, based on the logs emitted by the pod you can verify that the proxy was shut down before the Job completed. + ```shell-session $ kubectl logs test-job-49st7 -c test-job @@ -382,7 +384,7 @@ The service account on the pod spec for the deployment should be set to the firs serviceAccountName: web ``` -The following deployment example demonstrates the required annotations for the manifest. In addition, the previous YAML manifests can also be combined into a single manifest for easier deployment. +The following deployment example demonstrates the required annotations for the manifest. In addition, the previous YAML manifests can also be combined into a single manifest for easier deployment. diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index c4f639b2792be..d56729db24591 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -20,27 +20,22 @@ with Consul. Use these links to navigate to a particular top-level stanza. -- [Helm Chart Reference](#helm-chart-reference) - - [Top-Level Stanzas](#top-level-stanzas) - - [All Values](#all-values) - - [`global`](#h-global) - - [`server`](#h-server) - - [`externalServers`](#h-externalservers) - - [`client`](#h-client) - - [`dns`](#h-dns) - - [`ui`](#h-ui) - - [`syncCatalog`](#h-synccatalog) - - [`connectInject`](#h-connectinject) - - [`meshGateway`](#h-meshgateway) - - [`ingressGateways`](#h-ingressgateways) - - [`terminatingGateways`](#h-terminatinggateways) - - [`apiGateway`](#h-apigateway) - - [`webhookCertManager`](#h-webhookcertmanager) - - [`prometheus`](#h-prometheus) - - [`tests`](#h-tests) - - [`telemetryCollector`](#h-telemetrycollector) - - [Helm Chart Examples](#helm-chart-examples) - - [Customizing the Helm Chart](#customizing-the-helm-chart) +- [`global`](#h-global) +- [`server`](#h-server) +- [`externalServers`](#h-externalservers) +- [`client`](#h-client) +- [`dns`](#h-dns) +- [`ui`](#h-ui) +- [`syncCatalog`](#h-synccatalog) +- [`connectInject`](#h-connectinject) +- [`meshGateway`](#h-meshgateway) +- [`ingressGateways`](#h-ingressgateways) +- [`terminatingGateways`](#h-terminatinggateways) +- [`apiGateway`](#h-apigateway) +- [`webhookCertManager`](#h-webhookcertmanager) +- [`prometheus`](#h-prometheus) +- [`tests`](#h-tests) +- [`telemetryCollector`](#h-telemetrycollector) ## All Values @@ -64,7 +59,7 @@ Use these links to navigate to a particular top-level stanza. the prefix will be `-consul`. - `domain` ((#v-global-domain)) (`string: consul`) - The domain Consul will answer DNS queries for - (Refer to [`-domain`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_domain)) and the domain services synced from + (Refer to [`-domain`](/consul/docs/agent/config/cli-flags#_domain)) and the domain services synced from Consul into Kubernetes will have, e.g. `service-name.service.consul`. - `peering` ((#v-global-peering)) - Configures the Cluster Peering feature. Requires Consul v1.14+ and Consul-K8s v1.0.0+. @@ -125,7 +120,7 @@ Use these links to navigate to a particular top-level stanza. - `secretsBackend` ((#v-global-secretsbackend)) - secretsBackend is used to configure Vault as the secrets backend for the Consul on Kubernetes installation. The Vault cluster needs to have the Kubernetes Auth Method, KV2 and PKI secrets engines enabled and have necessary secrets, policies and roles created prior to installing Consul. - Refer to [Vault as the Secrets Backend](https://developer.hashicorp.com/consul/docs/k8s/deployment-configurations/vault) + Refer to [Vault as the Secrets Backend](/consul/docs/k8s/deployment-configurations/vault) documentation for full instructions. The Vault cluster _must_ not have the Consul cluster installed by this Helm chart as its storage backend @@ -212,11 +207,11 @@ Use these links to navigate to a particular top-level stanza. - `secretKey` ((#v-global-secretsbackend-vault-ca-secretkey)) (`string: ""`) - The key within the Kubernetes or Vault secret that holds the Vault CA certificate. - - `connectCA` ((#v-global-secretsbackend-vault-connectca)) - Configuration for the Vault service mesh CA provider. + - `connectCA` ((#v-global-secretsbackend-vault-connectca)) - Configuration for the Vault Connect CA provider. The provider will be configured to use the Vault Kubernetes auth method and therefore requires the role provided by `global.secretsBackend.vault.consulServerRole` to have permissions to the root and intermediate PKI paths. - Please refer to [Vault ACL policies](https://developer.hashicorp.com/consul/docs/connect/ca/vault#vault-acl-policies) + Please refer to [Vault ACL policies](/consul/docs/connect/ca/vault#vault-acl-policies) documentation for information on how to configure the Vault policies. - `address` ((#v-global-secretsbackend-vault-connectca-address)) (`string: ""`) - The address of the Vault server. @@ -224,13 +219,13 @@ Use these links to navigate to a particular top-level stanza. - `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault. - `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate. - For more details, please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#rootpkipath). + For more details, please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#rootpkipath). - `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate. - For more details, please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#intermediatepkipath). + For more details, please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#intermediatepkipath). - - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional service mesh CA configuration in JSON format. - Please refer to [Vault service mesh CA configuration](https://developer.hashicorp.com/consul/docs/connect/ca/vault#configuration) + - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format. + Please refer to [Vault Connect CA configuration](/consul/docs/connect/ca/vault#configuration) for all configuration options available for that provider. Example: @@ -251,20 +246,20 @@ Use these links to navigate to a particular top-level stanza. - `caCert` ((#v-global-secretsbackend-vault-connectinject-cacert)) - Configuration to the Vault Secret that Kubernetes uses on Kubernetes pod creation, deletion, and update, to get CA certificates - used issued from vault to send webhooks to the connect inject. + used issued from vault to send webhooks to the ConnectInject. - `secretName` ((#v-global-secretsbackend-vault-connectinject-cacert-secretname)) (`string: null`) - The Vault secret path that contains the CA certificate for - connect inject webhooks. + Connect Inject webhooks. - `tlsCert` ((#v-global-secretsbackend-vault-connectinject-tlscert)) - Configuration to the Vault Secret that Kubernetes uses on Kubernetes pod creation, deletion, and update, to get TLS certificates - used issued from vault to send webhooks to the connect inject. + used issued from vault to send webhooks to the ConnectInject. - `secretName` ((#v-global-secretsbackend-vault-connectinject-tlscert-secretname)) (`string: null`) - The Vault secret path that issues TLS certificates for connect inject webhooks. - `gossipEncryption` ((#v-global-gossipencryption)) - Configures Consul's gossip encryption key. - (Refer to [`-encrypt`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_encrypt)). + (Refer to [`-encrypt`](/consul/docs/agent/config/cli-flags#_encrypt)). By default, gossip encryption is not enabled. The gossip encryption key may be set automatically or manually. The recommended method is to automatically generate the key. To automatically generate and set a gossip encryption key, set autoGenerate to true. @@ -295,17 +290,17 @@ Use these links to navigate to a particular top-level stanza. - `recursors` ((#v-global-recursors)) (`array: []`) - A list of addresses of upstream DNS servers that are used to recursively resolve DNS queries. These values are given as `-recursor` flags to Consul servers and clients. - Refer to [`-recursor`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_recursor) for more details. + Refer to [`-recursor`](/consul/docs/agent/config/cli-flags#_recursor) for more details. If this is an empty array (the default), then Consul DNS will only resolve queries for the Consul top level domain (by default `.consul`). - - `tls` ((#v-global-tls)) - Enables [TLS](https://developer.hashicorp.com/consul/tutorials/security/tls-encryption-secure) + - `tls` ((#v-global-tls)) - Enables [TLS](/consul/tutorials/security/tls-encryption-secure) across the cluster to verify authenticity of the Consul servers and clients. Requires Consul v1.4.1+. - `enabled` ((#v-global-tls-enabled)) (`boolean: false`) - If true, the Helm chart will enable TLS for Consul servers and clients and all consul-k8s-control-plane components, as well as generate certificate authority (optional) and server and client certificates. - This setting is required for [Cluster Peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). + This setting is required for [Cluster Peering](/consul/docs/connect/cluster-peering/k8s). - `enableAutoEncrypt` ((#v-global-tls-enableautoencrypt)) (`boolean: false`) - If true, turns on the auto-encrypt feature on clients and servers. It also switches consul-k8s-control-plane components to retrieve the CA from the servers @@ -322,7 +317,7 @@ Use these links to navigate to a particular top-level stanza. - `verify` ((#v-global-tls-verify)) (`boolean: true`) - If true, `verify_outgoing`, `verify_server_hostname`, and `verify_incoming` for internal RPC communication will be set to `true` for Consul servers and clients. Set this to false to incrementally roll out TLS on an existing Consul cluster. - Please refer to [TLS on existing clusters](https://developer.hashicorp.com/consul/docs/k8s/operations/tls-on-existing-cluster) + Please refer to [TLS on existing clusters](/consul/docs/k8s/operations/tls-on-existing-cluster) for more details. - `httpsOnly` ((#v-global-tls-httpsonly)) (`boolean: true`) - If true, the Helm chart will configure Consul to disable the HTTP port on @@ -410,6 +405,23 @@ Use these links to navigate to a particular top-level stanza. - `secretKey` ((#v-global-acls-replicationtoken-secretkey)) (`string: null`) - The key within the Kubernetes or Vault secret that holds the replication token. + - `resources` ((#v-global-acls-resources)) (`map`) - The resource requests (CPU, memory, etc.) for the server-acl-init and server-acl-init-cleanup pods. + This should be a YAML map corresponding to a Kubernetes + [`ResourceRequirements``](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) + object. + + Example: + + ```yaml + resources: + requests: + memory: '200Mi' + cpu: '100m' + limits: + memory: '200Mi' + cpu: '100m' + ``` + - `partitionToken` ((#v-global-acls-partitiontoken)) - partitionToken references a Vault secret containing the ACL token to be used in non-default partitions. This value should only be provided in the default partition and only when setting the `global.secretsBackend.vault.enabled` value to true. @@ -475,7 +487,7 @@ Use these links to navigate to a particular top-level stanza. This address must be reachable from the Consul servers in the primary datacenter. This auth method will be used to provision ACL tokens for Consul components and is different from the one used by the Consul Service Mesh. - Please refer to the [Kubernetes Auth Method documentation](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes). + Please refer to the [Kubernetes Auth Method documentation](/consul/docs/security/acl/auth-methods/kubernetes). You can retrieve this value from your `kubeconfig` by running: @@ -602,7 +614,7 @@ Use these links to navigate to a particular top-level stanza. Consul server agents. - `replicas` ((#v-server-replicas)) (`integer: 1`) - The number of server agents to run. This determines the fault tolerance of - the cluster. Please refer to the [deployment table](https://developer.hashicorp.com/consul/docs/architecture/consensus#deployment-table) + the cluster. Please refer to the [deployment table](/consul/docs/architecture/consensus#deployment-table) for more information. - `bootstrapExpect` ((#v-server-bootstrapexpect)) (`int: null`) - The number of servers that are expected to be running. @@ -641,7 +653,7 @@ Use these links to navigate to a particular top-level stanza. Vault Secrets backend: If you are using Vault as a secrets backend, a Vault Policy must be created which allows `["create", "update"]` capabilities on the PKI issuing endpoint, which is usually of the form `pki/issue/consul-server`. - Complete [this tutorial](https://developer.hashicorp.com/consul/tutorials/vault-secure/vault-pki-consul-secure-tls) + Complete [this tutorial](/consul/tutorials/vault-secure/vault-pki-consul-secure-tls) to learn how to generate a compatible certificate. Note: when using TLS, both the `server.serverCert` and `global.tls.caCert` which points to the CA endpoint of this PKI engine must be provided. @@ -681,18 +693,18 @@ Use these links to navigate to a particular top-level stanza. storage classes, the PersistentVolumeClaims would need to be manually created. A `null` value will use the Kubernetes cluster's default StorageClass. If a default StorageClass does not exist, you will need to create one. - Refer to the [Read/Write Tuning](https://developer.hashicorp.com/consul/docs/install/performance#read-write-tuning) + Refer to the [Read/Write Tuning](/consul/docs/install/performance#read-write-tuning) section of the Server Performance Requirements documentation for considerations around choosing a performant storage class. - ~> **Note:** The [Reference Architecture](https://developer.hashicorp.com/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers) + ~> **Note:** The [Reference Architecture](/consul/tutorials/production-deploy/reference-architecture#hardware-sizing-for-consul-servers) contains best practices and recommendations for selecting suitable hardware sizes for your Consul servers. - - `connect` ((#v-server-connect)) (`boolean: true`) - This will enable/disable [service mesh](https://developer.hashicorp.com/consul/docs/connect). Setting this to true + - `connect` ((#v-server-connect)) (`boolean: true`) - This will enable/disable [Connect](/consul/docs/connect). Setting this to true _will not_ automatically secure pod communication, this setting will only enable usage of the feature. Consul will automatically initialize - a new CA and set of certificates. Additional service mesh settings can be configured + a new CA and set of certificates. Additional Connect settings can be configured by setting the `server.extraConfig` value. - `serviceAccount` ((#v-server-serviceaccount)) @@ -716,10 +728,10 @@ Use these links to navigate to a particular top-level stanza. ```yaml resources: requests: - memory: '100Mi' + memory: '200Mi' cpu: '100m' limits: - memory: '100Mi' + memory: '200Mi' cpu: '100m' ``` @@ -741,7 +753,7 @@ Use these links to navigate to a particular top-level stanza. control a rolling update of Consul server agents. This value specifies the [partition](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions) for performing a rolling update. Please read the linked Kubernetes - and [Upgrade Consul](https://developer.hashicorp.com/consul/docs/k8s/upgrade#upgrading-consul-servers) + and [Upgrade Consul](/consul/docs/k8s/upgrade#upgrading-consul-servers) documentation for more information. - `disruptionBudget` ((#v-server-disruptionbudget)) - This configures the [`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) @@ -757,7 +769,7 @@ Use these links to navigate to a particular top-level stanza. --set 'server.disruptionBudget.maxUnavailable=0'` flag to the helm chart installation command because of a limitation in the Helm templating language. - - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](https://developer.hashicorp.com/consul/docs/agent/config/config-files) for Consul + - `extraConfig` ((#v-server-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](/consul/docs/agent/config/config-files) for Consul servers. This will be saved as-is into a ConfigMap that is read by the Consul server agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -934,18 +946,18 @@ Use these links to navigate to a particular top-level stanza. it could be used to configure custom consul parameters. - `snapshotAgent` ((#v-server-snapshotagent)) - Values for setting up and running - [snapshot agents](https://developer.hashicorp.com/consul/commands/snapshot/agent) + [snapshot agents](/consul/commands/snapshot/agent) within the Consul clusters. They run as a sidecar with Consul servers. - `enabled` ((#v-server-snapshotagent-enabled)) (`boolean: false`) - If true, the chart will install resources necessary to run the snapshot agent. - `interval` ((#v-server-snapshotagent-interval)) (`string: 1h`) - Interval at which to perform snapshots. - Refer to [`interval`](https://developer.hashicorp.com/consul/commands/snapshot/agent#interval) + Refer to [`interval`](/consul/commands/snapshot/agent#interval) - `configSecret` ((#v-server-snapshotagent-configsecret)) - A Kubernetes or Vault secret that should be manually created to contain the entire config to be used on the snapshot agent. This is the preferred method of configuration since there are usually storage - credentials present. Please refer to the [Snapshot agent config](https://developer.hashicorp.com/consul/commands/snapshot/agent#config-file-options) + credentials present. Please refer to the [Snapshot agent config](/consul/commands/snapshot/agent#config-file-options) for details. - `secretName` ((#v-server-snapshotagent-configsecret-secretname)) (`string: null`) - The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. @@ -966,6 +978,87 @@ Use these links to navigate to a particular top-level stanza. ... ``` + - `limits` ((#v-server-limits)) - Settings for potentially limiting timeouts, rate limiting on clients as well + as servers, and other settings to limit exposure too many requests, requests + waiting for too long, and other runtime considerations. + + - `requestLimits` ((#v-server-limits-requestlimits)) - This object specifies configurations that limit the rate of RPC and gRPC + requests on the Consul server. Limiting the rate of gRPC and RPC requests + also limits HTTP requests to the Consul server. + /consul/docs/agent/config/config-files#request_limits + + - `mode` ((#v-server-limits-requestlimits-mode)) (`string: disabled`) - Setting for disabling or enabling rate limiting. If not disabled, it + enforces the action that will occur when RequestLimitsReadRate + or RequestLimitsWriteRate is exceeded. The default value of "disabled" will + prevent any rate limiting from occuring. A value of "enforce" will block + the request from processings by returning an error. A value of + "permissive" will not block the request and will allow the request to + continue processing. + + - `readRate` ((#v-server-limits-requestlimits-readrate)) (`integer: -1`) - Setting that controls how frequently RPC, gRPC, and HTTP + queries are allowed to happen. In any large enough time interval, rate + limiter limits the rate to RequestLimitsReadRate tokens per second. + + See https://en.wikipedia.org/wiki/Token_bucket for more about token + buckets. + + - `writeRate` ((#v-server-limits-requestlimits-writerate)) (`integer: -1`) - Setting that controls how frequently RPC, gRPC, and HTTP + writes are allowed to happen. In any large enough time interval, rate + limiter limits the rate to RequestLimitsWriteRate tokens per second. + + See https://en.wikipedia.org/wiki/Token_bucket for more about token + buckets. + + - `auditLogs` ((#v-server-auditlogs)) - Added in Consul 1.8, the audit object allow users to enable auditing + and configure a sink and filters for their audit logs. Please refer to + [audit logs](/consul/docs/enterprise/audit-logging) documentation + for further information. + + - `enabled` ((#v-server-auditlogs-enabled)) (`boolean: false`) - Controls whether Consul logs out each time a user performs an operation. + global.acls.manageSystemACLs must be enabled to use this feature. + + - `sinks` ((#v-server-auditlogs-sinks)) (`array`) - A single entry of the sink object provides configuration for the destination to which Consul + will log auditing events. + + Example: + + ```yaml + sinks: + - name: My Sink + type: file + format: json + path: /tmp/audit.json + delivery_guarantee: best-effort + rotate_duration: 24h + rotate_max_files: 15 + rotate_bytes: 25165824 + + ``` + + The sink object supports the following keys: + + - `name` - Name of the sink. + + - `type` - Type specifies what kind of sink this is. Currently only file sinks are available + + - `format` - Format specifies what format the events will be emitted with. Currently only `json` + events are emitted. + + - `path` - The directory and filename to write audit events to. + + - `delivery_guarantee` - Specifies the rules governing how audit events are written. Consul + only supports `best-effort` event delivery. + + - `mode` - The permissions to set on the audit log files. + + - `rotate_duration` - Specifies the interval by which the system rotates to a new log file. + At least one of `rotate_duration` or `rotate_bytes` must be configured to enable audit logging. + + - `rotate_bytes` - Specifies how large an individual log file can grow before Consul rotates to a new file. + At least one of rotate_bytes or rotate_duration must be configured to enable audit logging. + + - `rotate_max_files` - Defines the limit that Consul should follow before it deletes old log files. + ### externalServers ((#h-externalservers)) - `externalServers` ((#v-externalservers)) - Configuration for Consul servers when the servers are running outside of Kubernetes. @@ -1003,7 +1096,7 @@ Use these links to navigate to a particular top-level stanza. - `k8sAuthMethodHost` ((#v-externalservers-k8sauthmethodhost)) (`string: null`) - If you are setting `global.acls.manageSystemACLs` and `connectInject.enabled` to true, set `k8sAuthMethodHost` to the address of the Kubernetes API server. This address must be reachable from the Consul servers. - Please refer to the [Kubernetes Auth Method documentation](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes). + Please refer to the [Kubernetes Auth Method documentation](/consul/docs/security/acl/auth-methods/kubernetes). You could retrieve this value from your `kubeconfig` by running: @@ -1026,7 +1119,7 @@ Use these links to navigate to a particular top-level stanza. - `image` ((#v-client-image)) (`string: null`) - The name of the Docker image (including any tag) for the containers running Consul client agents. - - `join` ((#v-client-join)) (`array: null`) - A list of valid [`-retry-join` values](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_retry_join). + - `join` ((#v-client-join)) (`array: null`) - A list of valid [`-retry-join` values](/consul/docs/agent/config/cli-flags#_retry_join). If this is `null` (default), then the clients will attempt to automatically join the server cluster running within Kubernetes. This means that with `server.enabled` set to true, clients will automatically @@ -1044,10 +1137,10 @@ Use these links to navigate to a particular top-level stanza. - `grpc` ((#v-client-grpc)) (`boolean: true`) - If true, agents will enable their GRPC listener on port 8502 and expose it to the host. This will use slightly more resources, but is - required for service mesh. + required for Connect. - `nodeMeta` ((#v-client-nodemeta)) - nodeMeta specifies an arbitrary metadata key/value pair to associate with the node - (refer to [`-node-meta`](https://developer.hashicorp.com/consul/docs/agent/config/cli-flags#_node_meta)) + (refer to [`-node-meta`](/consul/docs/agent/config/cli-flags#_node_meta)) - `pod-name` ((#v-client-nodemeta-pod-name)) (`string: ${HOSTNAME}`) @@ -1091,7 +1184,7 @@ Use these links to navigate to a particular top-level stanza. - `tlsInit` ((#v-client-containersecuritycontext-tlsinit)) (`map`) - The tls-init initContainer - - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](https://developer.hashicorp.com/consul/docs/agent/config/config-files) for Consul + - `extraConfig` ((#v-client-extraconfig)) (`string: {}`) - A raw string of extra [JSON configuration](/consul/docs/agent/config/config-files) for Consul clients. This will be saved as-is into a ConfigMap that is read by the Consul client agents. This can be used to add additional configuration that isn't directly exposed by the chart. @@ -1245,7 +1338,7 @@ Use these links to navigate to a particular top-level stanza. - `enabled` ((#v-dns-enabled)) (`boolean: -`) - - `enableRedirection` ((#v-dns-enableredirection)) (`boolean: -`) - If true, services using Consul service mesh will use Consul DNS + - `enableRedirection` ((#v-dns-enableredirection)) (`boolean: -`) - If true, services using Consul Connect will use Consul DNS for default DNS resolution. The DNS lookups fall back to the nameserver IPs listed in /etc/resolv.conf if not found in Consul. @@ -1357,16 +1450,16 @@ Use these links to navigate to a particular top-level stanza. will inherit from `global.metrics.enabled` value. - `provider` ((#v-ui-metrics-provider)) (`string: prometheus`) - Provider for metrics. Refer to - [`metrics_provider`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_metrics_provider) + [`metrics_provider`](/consul/docs/agent/config/config-files#ui_config_metrics_provider) This value is only used if `ui.enabled` is set to true. - `baseURL` ((#v-ui-metrics-baseurl)) (`string: http://prometheus-server`) - baseURL is the URL of the prometheus server, usually the service URL. This value is only used if `ui.enabled` is set to true. - - `dashboardURLTemplates` ((#v-ui-dashboardurltemplates)) - Corresponds to [`dashboard_url_templates`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates) + - `dashboardURLTemplates` ((#v-ui-dashboardurltemplates)) - Corresponds to [`dashboard_url_templates`](/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates) configuration. - - `service` ((#v-ui-dashboardurltemplates-service)) (`string: ""`) - Sets [`dashboardURLTemplates.service`](https://developer.hashicorp.com/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates_service). + - `service` ((#v-ui-dashboardurltemplates-service)) (`string: ""`) - Sets [`dashboardURLTemplates.service`](/consul/docs/agent/config/config-files#ui_config_dashboard_url_templates_service). ### syncCatalog ((#h-synccatalog)) @@ -1386,7 +1479,7 @@ Use these links to navigate to a particular top-level stanza. to run the sync program. - `default` ((#v-synccatalog-default)) (`boolean: true`) - If true, all valid services in K8S are - synced by default. If false, the service must be [annotated](https://developer.hashicorp.com/consul/docs/k8s/service-sync#enable-and-disable-sync) + synced by default. If false, the service must be [annotated](/consul/docs/k8s/service-sync#enable-and-disable-sync) properly to sync. In either case an annotation can override the default. @@ -1568,9 +1661,9 @@ Use these links to navigate to a particular top-level stanza. ### connectInject ((#h-connectinject)) -- `connectInject` ((#v-connectinject)) - Configures the automatic service mesh sidecar injector. +- `connectInject` ((#v-connectinject)) - Configures the automatic Connect sidecar injector. - - `enabled` ((#v-connectinject-enabled)) (`boolean: true`) - True if you want to enable service mesh sidecar injection. Set to "-" to inherit from + - `enabled` ((#v-connectinject-enabled)) (`boolean: true`) - True if you want to enable connect injection. Set to "-" to inherit from global.enabled. - `replicas` ((#v-connectinject-replicas)) (`integer: 1`) - The number of deployment replicas. @@ -1579,14 +1672,14 @@ Use these links to navigate to a particular top-level stanza. - `default` ((#v-connectinject-default)) (`boolean: false`) - If true, the injector will inject the Connect sidecar into all pods by default. Otherwise, pods must specify the - [injection annotation](https://developer.hashicorp.com/consul/docs/k8s/connect#consul-hashicorp-com-connect-inject) - to opt-in to service mesh sidecar injection. If this is true, pods can use the same annotation + [injection annotation](/consul/docs/k8s/connect#consul-hashicorp-com-connect-inject) + to opt-in to Connect injection. If this is true, pods can use the same annotation to explicitly opt-out of injection. - `transparentProxy` ((#v-connectinject-transparentproxy)) - Configures Transparent Proxy for Consul Service mesh services. Using this feature requires Consul 1.10.0-beta1+. - - `defaultEnabled` ((#v-connectinject-transparentproxy-defaultenabled)) (`boolean: true`) - If true, then all Consul service mesh will run with transparent proxy enabled by default, + - `defaultEnabled` ((#v-connectinject-transparentproxy-defaultenabled)) (`boolean: true`) - If true, then all Consul Service mesh will run with transparent proxy enabled by default, i.e. we enforce that all traffic within the pod will go through the proxy. This value is overridable via the "consul.hashicorp.com/transparent-proxy" pod annotation. @@ -1613,6 +1706,64 @@ Use these links to navigate to a particular top-level stanza. - `minAvailable` ((#v-connectinject-disruptionbudget-minavailable)) (`integer: null`) - The minimum number of available pods. Takes precedence over maxUnavailable if set. + - `apiGateway` ((#v-connectinject-apigateway)) - Configuration settings for the Consul API Gateway integration. + + - `manageExternalCRDs` ((#v-connectinject-apigateway-manageexternalcrds)) (`boolean: true`) - Enables Consul on Kubernetes to manage the CRDs used for Gateway API. + Setting this to true will install the CRDs used for the Gateway API when Consul on Kubernetes is installed. + These CRDs can clash with existing Gateway API CRDs if they are already installed in your cluster. + If this setting is false, you will need to install the Gateway API CRDs manually. + + - `managedGatewayClass` ((#v-connectinject-apigateway-managedgatewayclass)) - Configuration settings for the GatewayClass installed by Consul on Kubernetes. + + - `nodeSelector` ((#v-connectinject-apigateway-managedgatewayclass-nodeselector)) (`string: null`) - This value defines [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) + labels for gateway pod assignment, formatted as a multi-line string. + + Example: + + ```yaml + nodeSelector: | + beta.kubernetes.io/arch: amd64 + ``` + + - `tolerations` ((#v-connectinject-apigateway-managedgatewayclass-tolerations)) (`string: null`) - Toleration settings for gateway pods created with the managed gateway class. + This should be a multi-line string matching the + [Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) array in a Pod spec. + + - `serviceType` ((#v-connectinject-apigateway-managedgatewayclass-servicetype)) (`string: LoadBalancer`) - This value defines the type of Service created for gateways (e.g. LoadBalancer, ClusterIP) + + - `copyAnnotations` ((#v-connectinject-apigateway-managedgatewayclass-copyannotations)) - Configuration settings for annotations to be copied from the Gateway to other child resources. + + - `service` ((#v-connectinject-apigateway-managedgatewayclass-copyannotations-service)) (`string: null`) - This value defines a list of annotations to be copied from the Gateway to the Service created, formatted as a multi-line string. + + Example: + + ```yaml + service: + annotations: | + - external-dns.alpha.kubernetes.io/hostname + ``` + + - `deployment` ((#v-connectinject-apigateway-managedgatewayclass-deployment)) - This value defines the number of pods to deploy for each Gateway as well as a min and max number of pods for all Gateways + + - `defaultInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-defaultinstances)) (`integer: 1`) + + - `maxInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-maxinstances)) (`integer: 1`) + + - `minInstances` ((#v-connectinject-apigateway-managedgatewayclass-deployment-mininstances)) (`integer: 1`) + + - `serviceAccount` ((#v-connectinject-apigateway-serviceaccount)) - Configuration for the ServiceAccount created for the api-gateway component + + - `annotations` ((#v-connectinject-apigateway-serviceaccount-annotations)) (`string: null`) - This value defines additional annotations for the client service account. This should be formatted as a multi-line + string. + + ```yaml + annotations: | + "sample/annotation1": "foo" + "sample/annotation2": "bar" + ``` + + - `resources` ((#v-connectinject-apigateway-resources)) (`map`) - The resource settings for Pods handling traffic for Gateway API. + - `cni` ((#v-connectinject-cni)) - Configures consul-cni plugin for Consul Service mesh services - `enabled` ((#v-connectinject-cni-enabled)) (`boolean: false`) - If true, then all traffic redirection setup uses the consul-cni plugin. @@ -1681,7 +1832,7 @@ Use these links to navigate to a particular top-level stanza. persistent: true ``` - - `metrics` ((#v-connectinject-metrics)) - Configures metrics for services in the Consul service mesh. All values are overridable + - `metrics` ((#v-connectinject-metrics)) - Configures metrics for Consul Connect services. All values are overridable via annotations on a per-pod basis. - `defaultEnabled` ((#v-connectinject-metrics-defaultenabled)) (`string: -`) - If true, the connect-injector will automatically @@ -1690,14 +1841,14 @@ Use these links to navigate to a particular top-level stanza. metrics will depend on whether metrics merging is enabled: - If metrics merging is enabled: the consul-dataplane will run a merged metrics server - combining Envoy sidecar and mesh service metrics, + combining Envoy sidecar and Connect service metrics, i.e. if your service exposes its own Prometheus metrics. - If metrics merging is disabled: the listener will just expose Envoy sidecar metrics. This will inherit from `global.metrics.enabled`. - `defaultEnableMerging` ((#v-connectinject-metrics-defaultenablemerging)) (`boolean: false`) - Configures the consul-dataplane to run a merged metrics server - to combine and serve both Envoy and mesh service metrics. + to combine and serve both Envoy and Connect service metrics. This feature is available only in Consul v1.10.0 or greater. - `defaultMergedMetricsPort` ((#v-connectinject-metrics-defaultmergedmetricsport)) (`integer: 20100`) - Configures the port at which the consul-dataplane will listen on to return @@ -1763,13 +1914,13 @@ Use these links to navigate to a particular top-level stanza. - `requests` ((#v-connectinject-resources-requests)) - - `memory` ((#v-connectinject-resources-requests-memory)) (`string: 50Mi`) - Recommended production default: 500Mi + - `memory` ((#v-connectinject-resources-requests-memory)) (`string: 200Mi`) - Recommended production default: 500Mi - `cpu` ((#v-connectinject-resources-requests-cpu)) (`string: 50m`) - Recommended production default: 250m - `limits` ((#v-connectinject-resources-limits)) - - `memory` ((#v-connectinject-resources-limits-memory)) (`string: 50Mi`) - Recommended production default: 500Mi + - `memory` ((#v-connectinject-resources-limits-memory)) (`string: 200Mi`) - Recommended production default: 500Mi - `cpu` ((#v-connectinject-resources-limits-cpu)) (`string: 50m`) - Recommended production default: 250m @@ -1798,13 +1949,13 @@ Use these links to navigate to a particular top-level stanza. namespace-label: label-value ``` - - `k8sAllowNamespaces` ((#v-connectinject-k8sallownamespaces)) (`array: ["*"]`) - List of k8s namespaces to allow service mesh sidecar + - `k8sAllowNamespaces` ((#v-connectinject-k8sallownamespaces)) (`array: ["*"]`) - List of k8s namespaces to allow Connect sidecar injection in. If a k8s namespace is not included or is listed in `k8sDenyNamespaces`, pods in that k8s namespace will not be injected even if they are explicitly annotated. Use `["*"]` to automatically allow all k8s namespaces. For example, `["namespace1", "namespace2"]` will only allow pods in the k8s - namespaces `namespace1` and `namespace2` to have service mesh sidecars injected + namespaces `namespace1` and `namespace2` to have Connect sidecars injected and registered with Consul. All other k8s namespaces will be ignored. To deny all namespaces, set this to `[]`. @@ -1813,7 +1964,7 @@ Use these links to navigate to a particular top-level stanza. `namespaceSelector` takes precedence over both since it is applied first. `kube-system` and `kube-public` are never injected, even if included here. - - `k8sDenyNamespaces` ((#v-connectinject-k8sdenynamespaces)) (`array: []`) - List of k8s namespaces that should not allow service mesh + - `k8sDenyNamespaces` ((#v-connectinject-k8sdenynamespaces)) (`array: []`) - List of k8s namespaces that should not allow Connect sidecar injection. This list takes precedence over `k8sAllowNamespaces`. `*` is not supported because then nothing would be allowed to be injected. @@ -1869,8 +2020,8 @@ Use these links to navigate to a particular top-level stanza. If set to an empty string all service accounts can log in. This only has effect if ACLs are enabled. - Refer to Auth methods [Binding rules](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods#binding-rules) - and [Trusted identiy attributes](https://developer.hashicorp.com/consul/docs/security/acl/auth-methods/kubernetes#trusted-identity-attributes) + Refer to Auth methods [Binding rules](/consul/docs/security/acl/auth-methods#binding-rules) + and [Trusted identiy attributes](/consul/docs/security/acl/auth-methods/kubernetes#trusted-identity-attributes) for more details. Requires Consul >= v1.5. @@ -1878,7 +2029,7 @@ Use these links to navigate to a particular top-level stanza. auth method for Connect inject, set this to the name of your auth method. - `aclInjectToken` ((#v-connectinject-aclinjecttoken)) - Refers to a Kubernetes secret that you have created that contains - an ACL token for your Consul cluster which allows the connect injector the correct + an ACL token for your Consul cluster which allows the Connect injector the correct permissions. This is only needed if Consul namespaces and ACLs are enabled on the Consul cluster and you are not setting `global.acls.manageSystemACLs` to `true`. @@ -1922,7 +2073,26 @@ Use these links to navigate to a particular top-level stanza. - `cpu` ((#v-connectinject-sidecarproxy-resources-limits-cpu)) (`string: null`) - Recommended production default: 100m - - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - The resource settings for the connect injected init container. If null, the resources + - `lifecycle` ((#v-connectinject-sidecarproxy-lifecycle)) (`map`) - Set default lifecycle management configuration for sidecar proxy. + These settings can be overridden on a per-pod basis via these annotations: + + - `consul.hashicorp.com/enable-sidecar-proxy-lifecycle` + - `consul.hashicorp.com/enable-sidecar-proxy-shutdown-drain-listeners` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-shutdown-grace-period-seconds` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-port` + - `consul.hashicorp.com/sidecar-proxy-lifecycle-graceful-shutdown-path` + + - `defaultEnabled` ((#v-connectinject-sidecarproxy-lifecycle-defaultenabled)) (`boolean: true`) + + - `defaultEnableShutdownDrainListeners` ((#v-connectinject-sidecarproxy-lifecycle-defaultenableshutdowndrainlisteners)) (`boolean: true`) + + - `defaultShutdownGracePeriodSeconds` ((#v-connectinject-sidecarproxy-lifecycle-defaultshutdowngraceperiodseconds)) (`integer: 30`) + + - `defaultGracefulPort` ((#v-connectinject-sidecarproxy-lifecycle-defaultgracefulport)) (`integer: 20600`) + + - `defaultGracefulShutdownPath` ((#v-connectinject-sidecarproxy-lifecycle-defaultgracefulshutdownpath)) (`string: /graceful_shutdown`) + + - `initContainer` ((#v-connectinject-initcontainer)) (`map`) - The resource settings for the Connect injected init container. If null, the resources won't be set for the initContainer. The defaults are optimized for developer instances of Kubernetes, however they should be tweaked with the recommended defaults as shown below to speed up service registration times. @@ -1942,11 +2112,11 @@ Use these links to navigate to a particular top-level stanza. ### meshGateway ((#h-meshgateway)) -- `meshGateway` ((#v-meshgateway)) - [Mesh Gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) enable Consul service mesh to work across Consul datacenters. +- `meshGateway` ((#v-meshgateway)) - [Mesh Gateways](/consul/docs/connect/gateways/mesh-gateway) enable Consul Connect to work across Consul datacenters. - - `enabled` ((#v-meshgateway-enabled)) (`boolean: false`) - If [mesh gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) are enabled, a Deployment will be created that runs - gateways and Consul service mesh will be configured to use gateways. - This setting is required for [cluster peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). + - `enabled` ((#v-meshgateway-enabled)) (`boolean: false`) - If [mesh gateways](/consul/docs/connect/gateways/mesh-gateway) are enabled, a Deployment will be created that runs + gateways and Consul Connect will be configured to use gateways. + This setting is required for [Cluster Peering](/consul/docs/connect/cluster-peering/k8s). Requirements: consul 1.6.0+ if using `global.acls.manageSystemACLs``. - `replicas` ((#v-meshgateway-replicas)) (`integer: 1`) - Number of replicas for the Deployment. @@ -2110,8 +2280,7 @@ Use these links to navigate to a particular top-level stanza. for a specific gateway. Requirements: consul >= 1.8.0 - - `enabled` ((#v-ingressgateways-enabled)) (`boolean: false`) - Enable ingress gateway deployment. Requires `connectInject.enabled=true` - and `client.enabled=true`. + - `enabled` ((#v-ingressgateways-enabled)) (`boolean: false`) - Enable ingress gateway deployment. Requires `connectInject.enabled=true`. - `defaults` ((#v-ingressgateways-defaults)) - Defaults sets default values for all gateway fields. With the exception of annotations, defining any of these values in the `gateways` list @@ -2228,7 +2397,7 @@ Use these links to navigate to a particular top-level stanza. `defaults`. Values defined here override the defaults except in the case of annotations where both will be applied. - - `name` ((#v-ingressgateways-gateways-name)) (`string: ingress-gateway`) + - `name` ((#v-ingressgateways-gateways-name)) (`string: ingress-gateway`) ### terminatingGateways ((#h-terminatinggateways)) @@ -2240,8 +2409,7 @@ Use these links to navigate to a particular top-level stanza. for a specific gateway. Requirements: consul >= 1.8.0 - - `enabled` ((#v-terminatinggateways-enabled)) (`boolean: false`) - Enable terminating gateway deployment. Requires `connectInject.enabled=true` - and `client.enabled=true`. + - `enabled` ((#v-terminatinggateways-enabled)) (`boolean: false`) - Enable terminating gateway deployment. Requires `connectInject.enabled=true`. - `defaults` ((#v-terminatinggateways-defaults)) - Defaults sets default values for all gateway fields. With the exception of annotations, defining any of these values in the `gateways` list @@ -2344,11 +2512,12 @@ Use these links to navigate to a particular top-level stanza. `defaults`. Values defined here override the defaults except in the case of annotations where both will be applied. - - `name` ((#v-terminatinggateways-gateways-name)) (`string: terminating-gateway`) + - `name` ((#v-terminatinggateways-gateways-name)) (`string: terminating-gateway`) ### apiGateway ((#h-apigateway)) -- `apiGateway` ((#v-apigateway)) - Configuration settings for the Consul API Gateway integration +- `apiGateway` ((#v-apigateway)) - [DEPRECATED] Use connectInject.apiGateway instead. This stanza will be removed with the release of Consul 1.17 + Configuration settings for the Consul API Gateway integration - `enabled` ((#v-apigateway-enabled)) (`boolean: false`) - When true the helm chart will install the Consul API Gateway controller diff --git a/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx b/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx index 81bdc5e8672db..c08d8c3ac5d83 100644 --- a/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx +++ b/website/content/docs/k8s/l7-traffic/failover-tproxy.mdx @@ -18,7 +18,7 @@ Complete the following steps to configure failover service instances in Consul o ## Requirements -- `consul-k8s` v1.2.0-beta1 or newer. +- `consul-k8s` v1.2.0 or newer. - Consul service mesh must be enabled. Refer to [How does Consul Service Mesh Work on Kubernetes](/consul/docs/k8s/connect). - Proxies must be configured to run in transparent proxy mode. - To query virtual DNS names, you must use Consul DNS. diff --git a/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx b/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx index c8951d356cf6b..0852f81196fe4 100644 --- a/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx +++ b/website/content/docs/k8s/l7-traffic/route-to-virtual-services.mdx @@ -20,7 +20,7 @@ Complete the following steps to configure failover service instances in Consul o ## Requirements -- `consul-k8s` v1.2.0-beta1 or newer. +- `consul-k8s` v1.2.0 or newer. - Consul service mesh must be enabled. Refer to [How does Consul service mesh work on Kubernetes](/consul/docs/k8s/connect). - Proxies must be configured to run in transparent proxy mode. - To query virtual DNS names, you must use Consul DNS. @@ -119,4 +119,4 @@ You can query the KubeDNS if the real and virtual services are in the same Kuber http://virtual-api..svc.cluster.local ``` -Note that you cannot use KubeDNS if a corresponding Kubernetes service and pod do not exist. \ No newline at end of file +Note that you cannot use KubeDNS if a corresponding Kubernetes service and pod do not exist. diff --git a/website/content/docs/release-notes/consul/v1_16_x.mdx b/website/content/docs/release-notes/consul/v1_16_x.mdx index 616104a7094f2..fc3d002e0b67d 100644 --- a/website/content/docs/release-notes/consul/v1_16_x.mdx +++ b/website/content/docs/release-notes/consul/v1_16_x.mdx @@ -23,7 +23,7 @@ We are pleased to announce the following Consul updates. - [Route traffic to virtual services](/consul/docs/k8s/l7-traffic/route-to-virtual-services) - [Configure failover services](/consul/docs/k8s/l7-traffic/failover-tproxy). -- **Granular server-side rate limits (Enterprise):** You can now set limits per source IP address. The following steps describe the general process for setting global read and write rate limits: +- **Granular server-side rate limits:** You can now set limits per source IP address in Consul Enterprise. The following steps describe the general process for setting global read and write rate limits: 1. Set arbitrary limits to begin understanding the upper boundary of RPC and gRPC loads in your network. Refer to [Initialize rate limit settings](/consul/docs/agent/limits/usage/init-rate-limits) for additional information. 1. Monitor the metrics and logs and readjust the initial configurations as necessary. Refer to [Monitor rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits) diff --git a/website/content/docs/security/security-models/core.mdx b/website/content/docs/security/security-models/core.mdx index 2b6bb0515d718..92a5c1ac91c24 100644 --- a/website/content/docs/security/security-models/core.mdx +++ b/website/content/docs/security/security-models/core.mdx @@ -128,6 +128,9 @@ environment and adapt these configurations accordingly. ca_file = "consul-agent-ca.pem" cert_file = "dc1-server-consul-0.pem" key_file = "dc1-server-consul-0-key.pem" + } + + internal_rpc { verify_server_hostname = true } } @@ -145,6 +148,9 @@ environment and adapt these configurations accordingly. verify_incoming = false verify_outgoing = true ca_file = "consul-agent-ca.pem" + } + + internal_rpc { verify_server_hostname = true } } diff --git a/website/content/docs/services/configuration/checks-configuration-reference.mdx b/website/content/docs/services/configuration/checks-configuration-reference.mdx index c0d3e24cfde6b..fee071de51b0a 100644 --- a/website/content/docs/services/configuration/checks-configuration-reference.mdx +++ b/website/content/docs/services/configuration/checks-configuration-reference.mdx @@ -35,8 +35,8 @@ Specify health check options in the `check` block. To register two or more heath | `h2ping` | String value that specifies the HTTP2 endpoint, including port number, to send HTTP2 requests to. |
  • H2ping
  • | | `h2ping_use_tls` | Boolean value that enables TLS for H2ping checks when set to `true`. |
  • H2ping
  • | | `http` | String value that specifies an HTTP endpoint to send requests to. |
  • HTTP
  • | -| `tls_server_name` | String value that specifies the server name used to verify the hostname on the returned certificates unless `tls_skip_verify` is given. Also included in the client's handshake to support SNI. It is recommended that this field be left unspecified. The TLS client will deduce the server name for SNI from the check address unless it's an IP ([RFC 6066, Section 3](https://tools.ietf.org/html/rfc6066#section-3)). There are two common circumstances where supplying a `tls_server_name` can be beneficial:
  • When the check address is an IP, `tls_server_name` can be specified for SNI. Note: setting `tls_server_name` will also override the hostname used to verify the certificate presented by the server being checked.
  • When the hostname in the check address won't be present in the SAN (Subject Alternative Name) field of the certificate presented by the server being checked. Note: setting `tls_server_name` will also override the hostname used for SNI.
  • |
  • HTTP
  • H2Ping
  • gRPC
  • | -| `tls_skip_verify` | Boolean value that determines if the check verifies the chain and hostname of the certificate that the server presents. Set to `true` to disable verification. We recommend setting to `false` for production use. Default is `false`. |
  • HTTP
  • H2Ping
  • gRPC
  • | +| `tls_server_name` | String value that specifies the name of the TLS server that issues certificates. Defaults to the SNI determined by the address specified in the `http` field. Set the `tls_skip_verify` to `false` to disable this field. |
  • HTTP
  • | +| `tls_skip_verify` | Boolean value that disbles TLS for HTTP checks when set to `true`. Default is `false`. |
  • HTTP
  • | | `method` | String value that specifies the request method to send during HTTP checks. Default is `GET`. |
  • HTTP
  • | | `header` | Object that specifies header fields to send in HTTP check requests. Each header specified in `header` object contains a list of string values. |
  • HTTP
  • | | `body` | String value that contains JSON attributes to send in HTTP check requests. You must escap the quotation marks around the keys and values for each attribute. |
  • HTTP
  • | diff --git a/website/content/docs/services/configuration/services-configuration-reference.mdx b/website/content/docs/services/configuration/services-configuration-reference.mdx index 4614a4b268089..95f01e16ff73b 100644 --- a/website/content/docs/services/configuration/services-configuration-reference.mdx +++ b/website/content/docs/services/configuration/services-configuration-reference.mdx @@ -404,7 +404,7 @@ String value that specifies the namespace in which to register the service. Refe ## Multiple service definitions -You can define multiple services in a single definition file in the `services` block. This enables you register multiple services in a single command. Note that the HTTP API does not support the `services` block. +You can define multiple services in a single definition file in the `servcies` block. This enables you register multiple services in a single command. Note that the HTTP API does not support the `services` block. diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 7149dfebb2994..277050600cb85 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -410,19 +410,39 @@ }, { "title": "API gateway", - "href": "/consul/docs/connect/gateways/api-gateway/configuration/api-gateway" + "href": "/consul/docs/connect/gateways/api-gateway/configuration/api-gateway", + "badge": { + "text": "BETA", + "type": "outlined", + "color": "neutral" + } }, { "title": "HTTP route", - "href": "/consul/docs/connect/gateways/api-gateway/configuration/http-route" + "href": "/consul/docs/connect/gateways/api-gateway/configuration/http-route", + "badge": { + "text": "BETA", + "type": "outlined", + "color": "neutral" + } }, { "title": "TCP route", - "href": "/consul/docs/connect/gateways/api-gateway/configuration/tcp-route" + "href": "/consul/docs/connect/gateways/api-gateway/configuration/tcp-route", + "badge": { + "text": "BETA", + "type": "outlined", + "color": "neutral" + } }, { "title": "Inline certificate", - "href": "/consul/docs/connect/gateways/api-gateway/configuration/inline-certificate" + "href": "/consul/docs/connect/gateways/api-gateway/configuration/inline-certificate", + "badge": { + "text": "BETA", + "type": "outlined", + "color": "neutral" + } }, { "title": "Ingress gateway", @@ -502,7 +522,7 @@ { "title": "Delegate authorization to external services", "path": "connect/proxies/envoy-extensions/usage/ext-authz" - }, + }, { "title": "Run Lua scripts in Envoy proxies", "path": "connect/proxies/envoy-extensions/usage/lua" @@ -518,8 +538,7 @@ { "title": "Run WebAssembly plug-ins in Envoy proxies", "path": "connect/proxies/envoy-extensions/usage/wasm" - } - ] + } ] }, { "title": "Configuration", @@ -527,16 +546,16 @@ { "title": "External authorization", "path": "connect/proxies/envoy-extensions/configuration/ext-authz" - }, + }, { "title": "Property override", "path": "connect/proxies/envoy-extensions/configuration/property-override" - }, + }, { "title": "WebAssembly", "path": "connect/proxies/envoy-extensions/configuration/wasm" } - ] + ] } ] }, @@ -639,6 +658,11 @@ }, { "title": "API Gateways", + "badge": { + "text": "BETA", + "type": "outlined", + "color": "neutral" + }, "routes": [ { "title": "Overview", @@ -1046,7 +1070,7 @@ { "title": "Limit traffic rates from source IP addresses", "path": "agent/limits/usage/limit-request-rates-from-ips" - } + } ] }, { @@ -1067,6 +1091,11 @@ "title": "Sentinel", "path": "agent/sentinel" }, + { + "title": "RPC", + "path": "agent/rpc", + "hidden": true + }, { "title": "Experimental WAL LogStore", "routes": [